ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
fc2c582a-cd25-44b2-88af-7fa487bdb013 | cpp | tensorflow/tensorflow | ring_reducer | tensorflow/core/common_runtime/ring_reducer.cc | tensorflow/core/common_runtime/ring_reducer_test.cc | #include "tensorflow/core/common_runtime/ring_reducer.h"
#include <stdlib.h>
#include <atomic>
#include <functional>
#include <utility>
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_util.h"
#include "tensorflow/core/common_runtime/copy_tensor.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
RingReducer::~RingReducer() { group_size_tensor_ready_.WaitForNotification(); }
Status RingReducer::InitializeCollectiveParams(CollectiveParams* col_params) {
CHECK_EQ(col_params->instance.type, REDUCTION_COLLECTIVE);
CHECK_EQ(col_params->instance.impl_details.collective_name, "RingReduce");
return RingAlg::InitializeCollectiveParams(col_params);
}
void RingReducer::Run(StatusCallback done) {
CHECK(col_ctx_);
CHECK(col_params_);
col_ctx_->col_exec->UnblockDependencies(*col_params_);
done_ = std::move(done);
group_size_ = col_params_->group.group_size;
num_subdivs_ = static_cast<int>(
col_params_->instance.impl_details.subdiv_permutations.size());
CHECK_GT(num_subdivs_, 0);
if (VLOG_IS_ON(1)) {
string buf;
for (int r = 0; r < col_params_->group.members.size(); ++r) {
strings::StrAppend(&buf, "dev ", r, " : ",
col_params_->group.members[r].device.name(), "\n");
}
for (int sd = 0;
sd < col_params_->instance.impl_details.subdiv_permutations.size();
++sd) {
strings::StrAppend(&buf, "\nsubdiv ", sd, " perm: ");
for (auto x :
col_params_->instance.impl_details.subdiv_permutations[sd]) {
strings::StrAppend(&buf, x, ", ");
}
}
VLOG(1) << "RingReducer::Run for device " << col_ctx_->device_name
<< " default_rank " << col_params_->default_rank << "\n"
<< buf;
}
if ((col_ctx_->input != col_ctx_->output) &&
(DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) {
Notification note;
Status status;
tsl::profiler::TraceMe activity("MemCpyAsync",
tsl::profiler::TraceMeLevel::kInfo);
CollectiveRemoteAccessLocal::MemCpyAsync(
col_ctx_->op_ctx->op_device_context(),
col_ctx_->op_ctx->op_device_context(), col_ctx_->device,
col_ctx_->device, col_ctx_->op_ctx->input_alloc_attr(0),
col_ctx_->op_ctx->output_alloc_attr(0), col_ctx_->input,
col_ctx_->output, 0 ,
[¬e, &status](const Status& s) {
status.Update(s);
note.Notify();
});
note.WaitForNotification();
if (!status.ok()) {
done_(status);
return;
}
}
ContinueAfterInputCopy();
}
void RingReducer::ContinueAfterInputCopy() {
AllocatorAttributes attr = col_ctx_->op_ctx->output_alloc_attr(0);
ca_.reset(MakeCollectiveAdapter(col_ctx_->output, group_size_ * num_subdivs_,
col_ctx_->device->GetAllocator(attr)));
if (col_params_->final_op) {
Tensor group_size_val = ca_->Scalar(group_size_);
if (col_params_->group.device_type != "CPU") {
uint64 safe_alloc_frontier = col_ctx_->device->SafeAllocFrontier(0);
AllocationAttributes aa;
std::function<uint64()> freed_by_func = [this, &safe_alloc_frontier]() {
safe_alloc_frontier =
col_ctx_->device->SafeAllocFrontier(safe_alloc_frontier);
return safe_alloc_frontier;
};
if (safe_alloc_frontier > 0) {
aa.freed_by_func = &freed_by_func;
}
group_size_tensor_ = ca_->Scalar(
col_ctx_->device->GetAllocator(col_ctx_->op_ctx->input_alloc_attr(0)),
aa);
DeviceContext* op_dev_ctx = col_ctx_->op_ctx->op_device_context();
op_dev_ctx->CopyCPUTensorToDevice(
&group_size_val, col_ctx_->device, &group_size_tensor_,
[this](const Status& s) {
if (!s.ok()) {
StartAbort(s);
}
group_size_tensor_ready_.Notify();
},
(safe_alloc_frontier == 0));
} else {
group_size_tensor_ = group_size_val;
group_size_tensor_ready_.Notify();
}
} else {
group_size_tensor_ready_.Notify();
}
Finish(RunAsyncParts());
}
void RingReducer::InitRingField(RingField* rf, int chunk_idx, int subdiv_idx,
int field_idx) {
RingAlg::InitRingField(rf, chunk_idx, subdiv_idx, field_idx);
if (rf->do_recv) {
rf->tmp_chunk = ca_->TempChunk(rf->sc_idx);
}
}
bool RingReducer::RunAsyncParts() {
rfv_.clear();
rfv_.resize(group_size_ * num_subdivs_);
PCQueue ready_queue;
for (int chunk_idx = 0; chunk_idx < group_size_; ++chunk_idx) {
for (int subdiv_idx = 0; subdiv_idx < num_subdivs_; ++subdiv_idx) {
int rf_index = (chunk_idx * num_subdivs_) + subdiv_idx;
InitRingField(&rfv_[rf_index], chunk_idx, subdiv_idx, rf_index);
ready_queue.Enqueue(&rfv_[rf_index]);
}
}
const DeviceBase::AcceleratorDeviceInfo* gpu_info =
col_ctx_->device->tensorflow_accelerator_device_info();
if (gpu_info) {
tsl::profiler::TraceMe activity("WaitForQueuedEvents",
tsl::profiler::TraceMeLevel::kInfo);
Notification note;
Status s = gpu_info->default_context->ThenExecute(
col_ctx_->device, gpu_info->stream, [¬e]() { note.Notify(); });
if (s.ok()) {
note.WaitForNotification();
} else {
mutex_lock l(status_mu_);
status_ =
errors::Internal("Failed to dispatch ThenExecute in RingReducer");
return false;
}
}
int field_done_count = 0;
int send_pending_count = 0;
int recv_pending_count = 0;
std::atomic<bool> aborted(false);
{
tsl::profiler::TraceMe activity("Loop", tsl::profiler::TraceMeLevel::kInfo);
while (field_done_count < rfv_.size()) {
VLOG(4) << FieldState();
RingField* rf = ready_queue.Dequeue();
bool dispatched = false;
do {
if (aborted) {
ready_queue.Enqueue(rf);
break;
}
switch (rf->action) {
case RF_INIT:
if (rf->do_recv) {
rf->action = RF_RECV;
auto requeue = [this, rf, &ready_queue, &aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchRecv(rf, requeue);
dispatched = true;
++recv_pending_count;
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_RECV:
CHECK_GT(recv_pending_count, 0);
--recv_pending_count;
if (!rf->second_pass) {
rf->action = RF_REDUCE;
Status s = collective_util::ComputeBinOp(
col_ctx_->op_ctx, col_ctx_->op_params, col_ctx_->device,
col_params_->merge_op, &rf->chunk, &rf->tmp_chunk);
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_REDUCE:
if (!rf->second_pass && col_params_->final_op && rf->is_final) {
rf->action = RF_FINALIZE;
group_size_tensor_ready_.WaitForNotification();
Status s = collective_util::ComputeBinOp(
col_ctx_->op_ctx, col_ctx_->op_params, col_ctx_->device,
col_params_->final_op, &rf->chunk, &group_size_tensor_);
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
} else {
rf->action = RF_SEND_READY;
}
break;
case RF_FINALIZE:
rf->action = RF_DONE;
break;
case RF_SEND_READY:
if (rf->do_send) {
rf->action = RF_SEND;
auto send_complete = [this, rf, &ready_queue,
&aborted](Status s) {
if (!s.ok()) {
aborted = true;
StartAbort(s);
}
ready_queue.Enqueue(rf);
};
DispatchSend(rf, send_complete);
dispatched = true;
++send_pending_count;
} else {
rf->action = RF_DONE;
}
break;
case RF_SEND:
CHECK_GT(send_pending_count, 0);
--send_pending_count;
rf->action = RF_DONE;
break;
case RF_DONE:
break;
}
if (rf->action == RF_DONE) {
if (rf->second_pass) {
++field_done_count;
break;
} else {
AdvanceToSecondPass(rf);
}
}
} while (!dispatched);
if (aborted) break;
}
if (aborted) {
while ((send_pending_count > 0) || (recv_pending_count > 0)) {
RingField* rf = ready_queue.Dequeue();
switch (rf->action) {
case RF_RECV:
--recv_pending_count;
break;
case RF_SEND:
--send_pending_count;
break;
default: {
}
}
}
}
}
CHECK_EQ(send_pending_count, 0);
CHECK_EQ(recv_pending_count, 0);
VLOG(2) << this << " device=" << col_ctx_->device_name << " finish;"
<< " final value " << TensorDebugString(ca_->Value());
return !aborted;
}
namespace {
REGISTER_COLLECTIVE(RingReduce, RingReducer);
}
} | #include "tensorflow/core/common_runtime/ring_reducer.h"
#include <algorithm>
#include "absl/memory/memory.h"
#include "tensorflow/core/common_runtime/base_collective_executor.h"
#include "tensorflow/core/common_runtime/collective_rma_local.h"
#include "tensorflow/core/common_runtime/collective_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_resolver_local.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/test_collective_executor_mgr.h"
#include "tensorflow/core/common_runtime/threadpool_device.h"
#include "tensorflow/core/framework/cancellation.h"
#include "tensorflow/core/framework/collective.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
std::unique_ptr<OpKernel> GetKernel(const NodeDef& node,
const DeviceType& device_type,
DeviceBase* device) {
Status status;
std::unique_ptr<OpKernel> k = CreateOpKernel(
device_type, device, device->GetAllocator(AllocatorAttributes()), node,
TF_GRAPH_DEF_VERSION, &status);
if (!status.ok()) {
LOG(FATAL) << status;
}
return k;
}
std::unique_ptr<OpKernel> GetAdd(DataType dtype, const DeviceType& device_type,
DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Add");
TF_CHECK_OK(builder.Attr("T", dtype)
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Finalize(&node_def));
return GetKernel(node_def, device_type, device);
}
std::unique_ptr<OpKernel> GetDiv(DataType dtype, const DeviceType& device_type,
DeviceBase* device) {
NodeDef node_def;
NodeDefBuilder builder("add_node", "Div");
TF_CHECK_OK(builder.Attr("T", dtype)
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Finalize(&node_def));
return GetKernel(node_def, device_type, device);
}
class RingReducerTest : public ::testing::Test {
protected:
void Init(int num_workers, int num_devices, DataType dtype,
const TensorShape& shape, const DeviceType& device_type,
int num_subdivs, int fail_after) {
test_env_ = CreateCollectiveTestEnv(num_workers, num_devices, device_type);
test_env_->remote_access->set_fail_after(fail_after);
for (int wi = 0; wi < num_workers; ++wi) {
for (int di = 0; di < num_devices; ++di) {
int rank = wi * num_devices + di;
instances_.push_back(std::make_unique<DeviceInstance>(
rank, num_subdivs, dtype, shape, test_env_.get()));
}
}
}
void Reduce(int fail_after) {
std::atomic<int> done(0);
for (auto& di : instances_) {
SchedClosure([&di, &done] {
di->DoReduce();
++done;
});
if (fail_after > 0) {
Env::Default()->SleepForMicroseconds(100);
}
}
while (done < static_cast<int>(instances_.size())) {
Env::Default()->SleepForMicroseconds(1000);
}
}
template <typename T>
void RunTest(DataType dtype, const DeviceType& device_type, int num_workers,
int num_devices, int num_subdivs, int tensor_len,
int fail_after) {
Init(num_workers, num_devices, dtype, TensorShape({tensor_len}),
device_type, num_subdivs, fail_after);
std::vector<T> expected(tensor_len);
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
instances_[di]->InitTensor([&expected, dtype, di](Tensor* t) {
for (size_t i = 0; i < t->NumElements(); ++i) {
float value = pow(10, static_cast<double>(di)) * i;
if (dtype == DT_INT32 || dtype == DT_INT64) {
value = di * 10 + i;
}
t->flat<T>()(i) = static_cast<T>(value);
expected[i] += static_cast<T>(value);
}
});
}
Reduce(fail_after);
if (fail_after > 0) {
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
EXPECT_NE(instances_[di]->status_.message().find("Deliberate failure"),
string::npos);
}
} else {
for (int i = 0; i < tensor_len; ++i) {
expected[i] /= static_cast<T>(num_workers * num_devices);
}
for (int di = 0; di < static_cast<int>(instances_.size()); ++di) {
TF_EXPECT_OK(instances_[di]->status_);
test::ExpectTensorEqual<T>(test::AsTensor<T>(expected),
instances_[di]->tensor());
}
}
}
class DeviceInstance {
public:
DeviceInstance(int rank, int num_subdivs, DataType dtype,
const TensorShape& shape, CollectiveTestEnv* test_env)
: test_env_(test_env), tensor_(dtype, shape) {
col_params_ = CreateCollectiveParams(*test_env_, rank, "RingReduce",
REDUCTION_COLLECTIVE, dtype, shape);
if (num_subdivs > 0) {
col_params_->instance.impl_details.subdiv_offsets =
GenerateEvenSubdivOffsets(test_env->num_devices_per_worker,
num_subdivs);
}
string dev_name = col_params_->group.members[rank].device.name();
TF_CHECK_OK(test_env_->device_mgr->LookupDevice(dev_name, &device_))
<< "Couldn't find device " << dev_name
<< " existing devices: " << test_env_->device_mgr->DebugString();
merge_op_ = GetAdd(col_params_->instance.data_type,
test_env_->device_type, device_);
final_op_ = GetDiv(col_params_->instance.data_type,
test_env_->device_type, device_);
col_params_->merge_op = merge_op_.get();
col_params_->final_op = final_op_.get();
}
void InitTensor(const std::function<void(Tensor*)>& init_f) {
init_f(&tensor_);
}
void DoReduce() {
status_ = RunCollective(test_env_, col_params_.get(), device_, &tensor_,
&tensor_);
}
const Tensor& tensor() { return tensor_; }
CollectiveTestEnv* test_env_;
Tensor tensor_;
Device* device_;
core::RefCountPtr<CollectiveParams> col_params_;
std::unique_ptr<OpKernel> merge_op_;
std::unique_ptr<OpKernel> final_op_;
Status status_;
};
std::unique_ptr<CollectiveTestEnv> test_env_;
std::vector<std::unique_ptr<DeviceInstance>> instances_;
mutex mu_;
int32 reduce_counter_ TF_GUARDED_BY(mu_) = 0;
};
class RingReducerInitParamsTest : public ::testing::Test {
protected:
void RunSubdivPermsTest(
CollectiveParams* cp,
const std::vector<std::vector<int>>& expected_subdiv_perms,
const std::vector<int>& expected_subdiv_rank) {
cp->instance.impl_details.subdiv_permutations.clear();
cp->subdiv_rank.clear();
core::RefCountPtr<RingReducer> reducer(new RingReducer());
TF_CHECK_OK(reducer->InitializeCollectiveParams(cp));
EXPECT_EQ(expected_subdiv_perms,
cp->instance.impl_details.subdiv_permutations);
EXPECT_EQ(expected_subdiv_rank, cp->subdiv_rank);
reducer->group_size_tensor_ready_.Notify();
}
};
TEST_F(RingReducerInitParamsTest, SpecifiedSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets = {0, 4};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
8, 9, 10, 11, 20, 21, 22, 23, 16, 17, 18, 19}},
{0, 4});
cp->instance.impl_details.subdiv_offsets = {0, -4};
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8,
15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20}},
{0, 3});
cp->default_rank = 3;
cp->instance.impl_details.subdiv_offsets = {3, -3};
RunSubdivPermsTest(cp.get(),
{{3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14,
15, 8, 9, 10, 19, 20, 21, 22, 23, 16, 17, 18},
{4, 3, 2, 1, 0, 7, 6, 5, 12, 11, 10, 9,
8, 15, 14, 13, 20, 19, 18, 17, 16, 23, 22, 21}},
{0, 1});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivs) {
const int kNumDevsPerWorker = 8;
const int kNumWorkers = 3;
const int kNumDevs = kNumDevsPerWorker * kNumWorkers;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}},
{0});
{
int num_subdivs = 2;
int num_chunks = kNumDevs * num_subdivs;
size_t chunk_size = 3 * 1048576;
size_t tensor_size = chunk_size * num_chunks;
cp->instance.shape = TensorShape(
{static_cast<int64_t>(tensor_size / DataTypeSize(DT_FLOAT))});
}
cp->instance.impl_details.subdiv_offsets.clear();
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8,
15, 14, 13, 12, 19, 18, 17, 16, 23, 22, 21, 20}},
{0, 3});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivUpperBound) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}, {0, 1, 2, 3}}, {0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivIgnoresMaxNumSubdivs) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.max_subdivs_per_device = 4;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}}, {0});
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 4;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(),
{{0, 1, 2, 3}, {0, 1, 2, 3}, {0, 1, 2, 3}, {0, 1, 2, 3}},
{0, 0, 0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivUsesDefault) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = 0;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}, {0, 1, 2, 3}}, {0, 0});
}
TEST_F(RingReducerInitParamsTest, AutomaticSubdivDisabled) {
const int kNumDevsPerWorker = 1;
const int kNumWorkers = 4;
auto test_env =
CreateCollectiveTestEnv(kNumWorkers, kNumDevsPerWorker, DEVICE_CPU);
auto cp =
CreateCollectiveParams(*test_env, 0, "RingReduce",
REDUCTION_COLLECTIVE, DT_FLOAT, TensorShape({1}));
cp->default_rank = 0;
cp->instance.impl_details.subdiv_offsets.clear();
cp->instance.impl_details.max_subdivs_per_device = -1;
cp->instance.shape = TensorShape({104857600 / DataTypeSize(DT_FLOAT)});
RunSubdivPermsTest(cp.get(), {{0, 1, 2, 3}}, {0});
}
#define DEF_TEST(B, T, W, D, S, L, A) \
TEST_F(RingReducerTest, \
DaTy##B##_DevTy##T##_Wkr##W##_Dev##D##_Sdiv##S##_Len##L##_Abrt##A) { \
DataType dtype = DT_##B; \
switch (dtype) { \
case DT_FLOAT: { \
RunTest<float>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_DOUBLE: { \
RunTest<double>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_BFLOAT16: { \
RunTest<tensorflow::bfloat16>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT32: { \
RunTest<int32>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
case DT_INT64: { \
RunTest<int64_t>(dtype, DEVICE_##T, W, D, S, L, A); \
} break; \
default: \
LOG(FATAL) << "Unimplemented"; \
} \
}
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 4, 1, 128, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 1001, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 4096, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 3, 1045991, 0)
DEF_TEST(FLOAT, CPU, 4, 4, 4, 1045991, 0)
DEF_TEST(DOUBLE, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(DOUBLE, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(BFLOAT16, CPU, 1, 2, 1, 8, 0)
DEF_TEST(BFLOAT16, CPU, 2, 8, 3, 16, 0)
DEF_TEST(INT32, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT32, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(INT64, CPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, CPU, 2, 8, 3, 4095, 0)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 1)
DEF_TEST(FLOAT, CPU, 2, 8, 1, 9408, 7)
DEF_TEST(FLOAT, CPU, 2, 8, 2, 9408, 11)
#endif
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 2, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 8, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 16, 0)
DEF_TEST(FLOAT, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 4096, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 3, 4095, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 3, 1045991, 0)
DEF_TEST(FLOAT, GPU, 1, 4, 4, 1045991, 0)
DEF_TEST(DOUBLE, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(INT64, GPU, 1, 2, 1, 1001, 0)
DEF_TEST(FLOAT, GPU, 1, 8, 1, 9408, 2)
DEF_TEST(FLOAT, GPU, 1, 8, 2, 9408, 5)
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_reducer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/ring_reducer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc4bf973-daf3-4afb-ac93-1887b4320c70 | cpp | tensorflow/tensorflow | tf_driver | tensorflow/lite/testing/tf_driver.cc | tensorflow/lite/testing/tf_driver_test.cc | #include "tensorflow/lite/testing/tf_driver.h"
#include <fstream>
#include <iostream>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
namespace tflite {
namespace testing {
namespace {
tensorflow::Tensor CreateTensor(const tensorflow::DataType type,
const std::vector<int64_t>& dim) {
tensorflow::TensorShape shape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(dim.data()), dim.size()}};
return {type, shape};
}
template <typename T>
int FillTensorWithData(tensorflow::Tensor* tensor,
const string& values_as_string) {
const auto& values = testing::Split<T>(values_as_string, ",");
if (values.size() == tensor->NumElements()) {
auto data = tensor->flat<T>();
for (int i = 0; i < values.size(); i++) {
data(i) = values[i];
}
}
return values.size();
}
int FillTensorWithTfLiteHexString(tensorflow::Tensor* tensor,
const string& values_as_string) {
string s = absl::HexStringToBytes(values_as_string);
int num_strings = values_as_string.empty() ? 0 : GetStringCount(s.data());
if (num_strings == tensor->NumElements()) {
auto data = tensor->flat<tensorflow::tstring>();
for (size_t i = 0; i < num_strings; ++i) {
auto ref = GetString(s.data(), i);
data(i).assign(ref.str, ref.len);
}
}
return num_strings;
}
template <typename T>
void FillTensorWithZeros(tensorflow::Tensor* tensor) {
auto data = tensor->flat<T>();
for (int i = 0; i < tensor->NumElements(); i++) {
data(i) = 0;
}
}
template <typename T>
string TensorDataToCsvString(const tensorflow::Tensor& tensor) {
const auto& data = tensor.flat<T>();
return Join(data.data(), data.size(), ",");
}
string TensorDataToTfLiteHexString(const tensorflow::Tensor& tensor) {
DynamicBuffer dynamic_buffer;
auto data = tensor.flat<tensorflow::tstring>();
for (int i = 0; i < tensor.NumElements(); ++i) {
dynamic_buffer.AddString(data(i).data(), data(i).size());
}
char* char_buffer = nullptr;
size_t size = dynamic_buffer.WriteToBuffer(&char_buffer);
string s = absl::BytesToHexString({char_buffer, size});
free(char_buffer);
return s;
}
}
TfDriver::TfDriver(const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer)
: input_names_(input_layer), output_names_(output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
input_ids_.resize(input_layer.size());
input_tensors_.reserve(input_layer.size());
input_types_.resize(input_layer.size());
input_shapes_.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
input_ids_[i] = i;
input_tensors_[input_layer[i]] = {};
CHECK(DataTypeFromString(input_layer_type[i], &input_types_[i]));
input_shapes_[i] = Split<int64_t>(input_layer_shape[i], ",");
input_name_to_id_[input_layer[i]] = i;
}
output_ids_.resize(output_layer.size());
output_tensors_.reserve(output_layer.size());
for (int i = 0; i < output_layer.size(); i++) {
output_ids_[i] = i;
output_name_to_id_[output_layer[i]] = i;
}
}
void TfDriver::LoadModel(const string& bin_file_path) {
if (!IsValid()) return;
std::ifstream model(bin_file_path);
if (model.fail()) {
Invalidate("Failed to find the model " + bin_file_path);
return;
}
tensorflow::GraphDef graphdef;
if (!graphdef.ParseFromIstream(&model)) {
Invalidate("Failed to parse tensorflow graphdef");
return;
}
tensorflow::SessionOptions options;
session_.reset(tensorflow::NewSession(options));
auto status = session_->Create(graphdef);
if (!status.ok()) {
Invalidate(absl::StrCat("Failed to create session. ", status.message()));
}
}
void TfDriver::ReshapeTensor(const string& name, const string& csv_values) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
input_shapes_[id] = Split<int64_t>(csv_values, ",");
input_tensors_[input_names_[id]] =
CreateTensor(input_types_[id], input_shapes_[id]);
ResetTensor(name);
}
void TfDriver::ResetTensor(const std::string& name) {
if (!IsValid()) return;
int id = input_name_to_id_[name];
auto tensor = input_tensors_[input_names_[id]];
switch (input_types_[id]) {
case tensorflow::DT_FLOAT: {
FillTensorWithZeros<float>(&tensor);
break;
}
case tensorflow::DT_INT32: {
FillTensorWithZeros<int32_t>(&tensor);
break;
}
default:
Invalidate(absl::StrCat("Unsupported tensor type ", input_types_[id],
tensorflow::DataType_Name(input_types_[id]),
" in ResetInput"));
return;
}
}
string TfDriver::ReadOutput(const string& name) {
if (!IsValid()) return "";
return ReadOutput(output_tensors_[output_name_to_id_[name]]);
}
void TfDriver::Invoke(const std::vector<std::pair<string, string>>& inputs) {
if (!IsValid()) return;
for (const auto& input : inputs) {
auto id = input_name_to_id_[input.first];
auto tensor = CreateTensor(input_types_[id], input_shapes_[id]);
SetInput(input.second, &tensor);
input_tensors_[input_names_[id]] = tensor;
}
auto status = session_->Run({input_tensors_.begin(), input_tensors_.end()},
output_names_, {}, &output_tensors_);
if (!status.ok()) {
Invalidate(
absl::StrCat("TensorFlow failed to run graph:", status.message()));
}
}
void TfDriver::SetInput(const string& values_as_string,
tensorflow::Tensor* tensor) {
int num_values_available = 0;
switch (tensor->dtype()) {
case tensorflow::DT_FLOAT:
num_values_available =
FillTensorWithData<float>(tensor, values_as_string);
break;
case tensorflow::DT_INT32:
num_values_available =
FillTensorWithData<int32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT32:
num_values_available =
FillTensorWithData<uint32_t>(tensor, values_as_string);
break;
case tensorflow::DT_UINT8:
num_values_available =
FillTensorWithData<uint8_t>(tensor, values_as_string);
break;
case tensorflow::DT_STRING:
num_values_available =
FillTensorWithTfLiteHexString(tensor, values_as_string);
break;
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor->dtype()),
" in SetInput"));
return;
}
if (tensor->NumElements() != num_values_available) {
Invalidate(absl::StrCat("Needed ", tensor->NumElements(),
" values for input tensor, but was given ",
num_values_available, " instead."));
}
}
string TfDriver::ReadOutput(const tensorflow::Tensor& tensor) {
switch (tensor.dtype()) {
case tensorflow::DT_FLOAT:
return TensorDataToCsvString<float>(tensor);
case tensorflow::DT_INT32:
return TensorDataToCsvString<int32_t>(tensor);
case tensorflow::DT_UINT32:
return TensorDataToCsvString<uint32_t>(tensor);
case tensorflow::DT_INT64:
return TensorDataToCsvString<int64_t>(tensor);
case tensorflow::DT_UINT8:
return TensorDataToCsvString<uint8_t>(tensor);
case tensorflow::DT_STRING:
return TensorDataToTfLiteHexString(tensor);
case tensorflow::DT_BOOL:
return TensorDataToCsvString<bool>(tensor);
default:
Invalidate(absl::StrCat("Unsupported tensor type ",
tensorflow::DataType_Name(tensor.dtype()),
" in ReadOutput"));
return "";
}
}
}
} | #include "tensorflow/lite/testing/tf_driver.h"
#include <algorithm>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace testing {
namespace {
class TestDriver : public TfDriver {
public:
TestDriver() : TfDriver({}, {}, {}, {}) {}
string WriteAndReadBack(tensorflow::DataType type,
const std::vector<int64_t>& shape,
const string& values) {
tensorflow::Tensor t = {
type,
tensorflow::TensorShape{absl::Span<const int64_t>{
reinterpret_cast<const int64_t*>(shape.data()), shape.size()}}};
SetInput(values, &t);
return ReadOutput(t);
}
};
TEST(TfDriverTest, ReadingAndWritingValues) {
TestDriver driver;
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_FLOAT, {1, 2, 2},
"0.10,0.20,0.30,0.40"),
"0.100000001,0.200000003,0.300000012,0.400000006");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_INT32, {1, 2, 2},
"10,40,100,-100"),
"10,40,100,-100");
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_UINT8, {1, 2, 2},
"48,49,121, 122"),
"0,1,y,z");
}
TEST(TfDriverTest, ReadingAndWritingValuesStrings) {
TestDriver driver;
auto set_buffer = [](const std::vector<string>& values, string* buffer) {
DynamicBuffer dynamic_buffer;
for (const string& s : values) {
dynamic_buffer.AddString(s.data(), s.size());
}
char* char_b = nullptr;
int size = dynamic_buffer.WriteToBuffer(&char_b);
*buffer = absl::BytesToHexString(absl::string_view(char_b, size));
free(char_b);
};
string buffer;
set_buffer({"", "", "", ""}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, ""),
buffer);
set_buffer({"AB", "ABC", "X", "YZ"}, &buffer);
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_STRING, {1, 2, 2}, buffer),
buffer);
}
TEST(TfDriverTest, SimpleTest) {
std::unique_ptr<TfDriver> runner(
new TfDriver({"a", "b", "c", "d"}, {"float", "float", "float", "float"},
{"1,8,8,3", "1,8,8,3", "1,8,8,3", "1,8,8,3"}, {"x", "y"}));
runner->LoadModel("tensorflow/lite/testdata/multi_add.pb");
EXPECT_TRUE(runner->IsValid()) << runner->GetErrorMessage();
for (const auto& i : {"a", "b", "c", "d"}) {
runner->ReshapeTensor(i, "1,2,2,1");
}
ASSERT_TRUE(runner->IsValid());
runner->ResetTensor("c");
runner->Invoke({{"a", "0.1,0.2,0.3,0.4"},
{"b", "0.001,0.002,0.003,0.004"},
{"d", "0.01,0.02,0.03,0.04"}});
ASSERT_EQ(runner->ReadOutput("x"),
"0.101000004,0.202000007,0.303000003,0.404000014");
ASSERT_EQ(runner->ReadOutput("y"),
"0.0109999999,0.0219999999,0.0329999998,0.0439999998");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tf_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tf_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13773e58-3b6b-4bb8-bc58-34d395c25d1b | cpp | tensorflow/tensorflow | decode_wav_op | tensorflow/core/kernels/decode_wav_op.cc | tensorflow/core/kernels/decode_wav_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/wav/wav_io.h"
namespace tensorflow {
class DecodeWavOp : public OpKernel {
public:
explicit DecodeWavOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("desired_channels", &desired_channels_));
OP_REQUIRES_OK(context,
context->GetAttr("desired_samples", &desired_samples_));
}
void Compute(OpKernelContext* context) override {
const Tensor& contents = context->input(0);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()),
errors::InvalidArgument("contents must be scalar, got shape ",
contents.shape().DebugString()));
const string& wav_string = contents.scalar<tstring>()();
OP_REQUIRES(context, wav_string.size() <= std::numeric_limits<int>::max(),
errors::InvalidArgument("WAV contents are too large for int: ",
wav_string.size()));
std::vector<float> decoded_samples;
uint32 decoded_sample_count;
uint16 decoded_channel_count;
uint32 decoded_sample_rate;
OP_REQUIRES_OK(context,
wav::DecodeLin16WaveAsFloatVector(
wav_string, &decoded_samples, &decoded_sample_count,
&decoded_channel_count, &decoded_sample_rate));
int32_t output_sample_count;
if (desired_samples_ == -1) {
output_sample_count = decoded_sample_count;
} else {
output_sample_count = desired_samples_;
}
int32_t output_channel_count;
if (desired_channels_ == -1) {
output_channel_count = decoded_channel_count;
} else {
output_channel_count = desired_channels_;
}
Tensor* output = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({output_sample_count, output_channel_count}),
&output));
auto output_matrix = output->matrix<float>();
for (int sample = 0; sample < output_sample_count; ++sample) {
for (int channel = 0; channel < output_channel_count; ++channel) {
float output_value;
if (sample >= decoded_sample_count) {
output_value = 0.0f;
} else {
int source_channel;
if (channel < decoded_channel_count) {
source_channel = channel;
} else {
source_channel = decoded_channel_count - 1;
}
const int decoded_index =
(sample * decoded_channel_count) + source_channel;
output_value = decoded_samples[decoded_index];
}
output_matrix(sample, channel) = output_value;
}
}
Tensor* sample_rate_output = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, TensorShape({}),
&sample_rate_output));
sample_rate_output->flat<int32>()(0) = decoded_sample_rate;
}
private:
int32 desired_channels_;
int32 desired_samples_;
};
REGISTER_KERNEL_BUILDER(Name("DecodeWav").Device(DEVICE_CPU), DecodeWavOp);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(DecodeWavOpTest, DecodeWavTest) {
Scope root = Scope::NewRootScope();
std::vector<uint8> wav_data = {
'R', 'I', 'F', 'F', 44, 0, 0, 0,
'W', 'A', 'V', 'E', 'f', 'm', 't', ' ', 16, 0, 0,
0,
1, 0,
1, 0,
0x13, 0x37, 0, 0,
0x26, 0x6e, 0, 0,
2, 0,
16, 0,
'd', 'a', 't', 'a', 8, 0, 0, 0,
0, 0,
0xff, 0x3f,
0xff, 0x7f,
0x00, 0x80,
};
Tensor content_tensor =
test::AsScalar<tstring>(string(wav_data.begin(), wav_data.end()));
Output content_op =
Const(root.WithOpName("content_op"), Input::Initializer(content_tensor));
DecodeWav decode_wav_op =
DecodeWav(root.WithOpName("decode_wav_op"), content_op);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{decode_wav_op.audio, decode_wav_op.sample_rate},
&outputs));
const Tensor& audio = outputs[0];
const int sample_rate = outputs[1].flat<int32>()(0);
EXPECT_EQ(2, audio.dims());
EXPECT_EQ(1, audio.dim_size(1));
EXPECT_EQ(4, audio.dim_size(0));
EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f);
EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f);
EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f);
EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f);
EXPECT_EQ(14099, sample_rate);
}
TEST(DecodeWavOpTest, DecodeWav_ShapeFn) {
ShapeInferenceTestOp op("DecodeWav");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[1]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav")
.Input({"a", 0, DT_STRING})
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,?];[]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav")
.Input({"a", 0, DT_STRING})
.Attr("desired_samples", 42)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[42,?];[]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav")
.Input({"a", 0, DT_STRING})
.Attr("desired_samples", -2)
.Finalize(&op.node_def));
INFER_ERROR("samples must be non-negative, got -2", op, "[]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav")
.Input({"a", 0, DT_STRING})
.Attr("desired_channels", 2)
.Finalize(&op.node_def));
INFER_OK(op, "[]", "[?,2];[]");
TF_ASSERT_OK(NodeDefBuilder("test", "DecodeWav")
.Input({"a", 0, DT_STRING})
.Attr("desired_channels", -2)
.Finalize(&op.node_def));
INFER_ERROR("channels must be non-negative, got -2", op, "[]");
}
TEST(DecodeWavOpTest, DecodeWavWithJunkChunkTest) {
Scope root = Scope::NewRootScope();
std::vector<uint8> wav_data = {
'R', 'I', 'F', 'F', 76, 0, 0, 0,
'W', 'A', 'V', 'E', 'J', 'U', 'N', 'K',
28, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 'f', 'm', 't', ' ',
16, 0, 0, 0,
1, 0,
1, 0,
0x13, 0x37, 0, 0,
0x26, 0x6e, 0, 0,
2, 0,
16, 0,
'd', 'a', 't', 'a', 8, 0, 0, 0,
0, 0,
0xff, 0x3f,
0xff, 0x7f,
0x00, 0x80,
};
Tensor content_tensor =
test::AsScalar<tstring>(string(wav_data.begin(), wav_data.end()));
Output content_op =
Const(root.WithOpName("content_op"), Input::Initializer(content_tensor));
DecodeWav decode_wav_op =
DecodeWav(root.WithOpName("decode_wav_op"), content_op);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{decode_wav_op.audio, decode_wav_op.sample_rate},
&outputs));
const Tensor& audio = outputs[0];
const int sample_rate = outputs[1].flat<int32>()(0);
EXPECT_EQ(2, audio.dims());
EXPECT_EQ(1, audio.dim_size(1));
EXPECT_EQ(4, audio.dim_size(0));
EXPECT_NEAR(0.0f, audio.flat<float>()(0), 1e-4f);
EXPECT_NEAR(0.5f, audio.flat<float>()(1), 1e-4f);
EXPECT_NEAR(1.0f, audio.flat<float>()(2), 1e-4f);
EXPECT_NEAR(-1.0f, audio.flat<float>()(3), 1e-4f);
EXPECT_EQ(14099, sample_rate);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/decode_wav_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/decode_wav_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d124380-5a97-495c-86d2-35ce131e46d1 | cpp | tensorflow/tensorflow | xla_compile_util | tensorflow/compiler/jit/xla_compile_util.cc | tensorflow/compiler/jit/xla_compile_util_test.cc | #include "tensorflow/compiler/jit/xla_compile_util.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
namespace {
constexpr const char* kPjRtDeviceCompilerResourceName = "pjrt_device_compiler";
constexpr const char* kPjRtDeviceCompilationProfilerResourceName =
"pjrt_device_compilation_profiler";
}
absl::StatusOr<std::unique_ptr<Graph>> CreateSingleOpGraph(
const NodeDef& node_def, absl::Span<const XlaArgument> args,
absl::Span<const DataType> result_types) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def));
for (int64_t i = 0, end = args.size(); i < end; ++i) {
Node* node;
string arg_name = absl::StrCat("_arg", i);
Status status =
NodeBuilder(arg_name, FunctionLibraryDefinition::kArgOp)
.ControlInput(graph->source_node())
.Attr("T", args[i].kind == XlaArgument::kResource ? DT_RESOURCE
: args[i].type)
.Attr("index", i)
.Finalize(graph.get(), &node);
TF_RETURN_IF_ERROR(status);
graph->AddEdge(node, 0, main_node, i);
}
for (int64_t i = 0, end = result_types.size(); i < end; ++i) {
Node* node;
string retval_name = absl::StrCat("_retval", i);
Status status = NodeBuilder(retval_name, FunctionLibraryDefinition::kRetOp)
.Input(main_node, i)
.Attr("T", result_types[i])
.Attr("index", i)
.Finalize(graph.get(), &node);
TF_RETURN_IF_ERROR(status);
}
FixupSourceAndSinkEdges(graph.get());
return graph;
}
bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type) {
const auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
return rollout_config.IsEnabledInXlaLaunchForDevice(device_type) ||
rollout_config.IsEnabledInXlaCompileOnDemandForDevice(device_type) ||
rollout_config.IsEnabledInXlaCompileAndRunForDevice(device_type);
}
std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type) {
return absl::StrCat(kPjRtDeviceCompilerResourceName, "_",
device_type.type_string());
}
std::string GetPjRtDeviceCompilationProfilerResourceName(
const DeviceType& device_type) {
return absl::StrCat(kPjRtDeviceCompilationProfilerResourceName, "_",
device_type.type_string());
}
absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler(
const OpKernelContext& ctx, const DeviceType& device_type) {
ResourceMgr* rm = nullptr;
if (device_type == DEVICE_TPU) {
rm = tfrt_global::GetTFGlobalResourceMgr();
} else {
rm = ctx.resource_manager();
}
if (!rm) {
return absl::InternalError("No resource manager found.");
}
return rm;
}
} | #include "tensorflow/compiler/jit/xla_compile_util.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
TEST_F(OpsTestBase, CreateSingleOpGraph) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2});
TF_EXPECT_OK(RunOpKernel());
XlaCompiler::SingleOpCompileArgument single_op_arg(*context_);
std::vector<XlaArgument> args(1);
args[0].kind = XlaArgument::kConstant;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({1, 2});
args[0].constant_value = GetInput(0);
args[0].initialized = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph,
CreateSingleOpGraph(*node_def(), args, single_op_arg.output_dtypes));
const auto& node_name_index = graph->BuildNodeNameIndex();
const Node* identity_node = node_name_index.at("identity_op");
EXPECT_EQ(identity_node->op_def().name(), "Identity");
EXPECT_EQ(identity_node->attrs().FindByString("T")->type(), DT_FLOAT);
EXPECT_EQ(identity_node->num_inputs(), 1);
const Node* identity_input_node = nullptr;
TF_EXPECT_OK(identity_node->input_node(0, &identity_input_node));
EXPECT_EQ(identity_input_node->name(), "_arg0");
const Node* arg_node = node_name_index.at("_arg0");
EXPECT_EQ(arg_node->op_def().name(), "_Arg");
EXPECT_EQ(arg_node->attrs().FindByString("T")->type(), DT_FLOAT);
const Node* retval_node = node_name_index.at("_retval0");
EXPECT_EQ(retval_node->op_def().name(), "_Retval");
EXPECT_EQ(retval_node->attrs().FindByString("T")->type(), DT_FLOAT);
EXPECT_EQ(identity_node->num_outputs(), 1);
EXPECT_EQ(retval_node->num_inputs(), 1);
const Node* retval_input_node = nullptr;
TF_EXPECT_OK(retval_node->input_node(0, &retval_input_node));
EXPECT_EQ(retval_input_node->name(), "identity_op");
}
TEST(XlaCompileUtilTest, PjRtXlaLaunchFlagTest) {
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_xla_launch_ = true;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_GPU));
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_CPU));
EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.enabled_for_xla_launch_ = false;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
}
TEST(XlaCompileUtilTest, PjRtXlaCompileOnDemandFlagTest) {
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_compile_on_demand_ = true;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_GPU));
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_CPU));
EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.enabled_for_compile_on_demand_ = false;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
}
TEST(XlaCompileUtilTest, PjRtDeviceCompilerResourceName) {
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU)),
"pjrt_device_compiler_TPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU_NODE)),
"pjrt_device_compiler_TPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_CPU)),
"pjrt_device_compiler_CPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_GPU)),
"pjrt_device_compiler_GPU");
}
TEST(XlaCompileUtilTest, PjRtDeviceCompilationProfilerResourceName) {
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU)),
"pjrt_device_compilation_profiler_TPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU_NODE)),
"pjrt_device_compilation_profiler_TPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_CPU)),
"pjrt_device_compilation_profiler_CPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_GPU)),
"pjrt_device_compilation_profiler_GPU");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compile_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compile_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94567bfa-b355-4273-8c93-21320569b490 | cpp | google/tensorstore | intrusive_ptr | tensorstore/internal/intrusive_ptr.h | tensorstore/internal/intrusive_ptr_test.cc | #ifndef TENSORSTORE_INTERNAL_INTRUSIVE_PTR_H_
#define TENSORSTORE_INTERNAL_INTRUSIVE_PTR_H_
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <type_traits>
#include <utility>
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/type_traits.h"
namespace tensorstore {
namespace internal {
template <typename Derived>
class AtomicReferenceCount {
public:
AtomicReferenceCount() = default;
AtomicReferenceCount(size_t initial_ref_count)
: ref_count_(initial_ref_count) {}
AtomicReferenceCount(const AtomicReferenceCount&) noexcept {}
AtomicReferenceCount& operator=(const AtomicReferenceCount&) noexcept {
return *this;
}
uint32_t use_count() const noexcept {
return ref_count_.load(std::memory_order_acquire);
}
template <typename D>
friend bool IncrementReferenceCountIfNonZero(
const AtomicReferenceCount<D>& base);
template <typename D>
friend bool DecrementReferenceCount(const AtomicReferenceCount<D>& base);
friend void intrusive_ptr_increment(const AtomicReferenceCount* p) noexcept {
p->ref_count_.fetch_add(1, std::memory_order_acq_rel);
}
friend void intrusive_ptr_decrement(const AtomicReferenceCount* p) noexcept {
if (DecrementReferenceCount(*p)) {
delete static_cast<const Derived*>(p);
}
}
private:
mutable std::atomic<uint32_t> ref_count_{0};
};
template <typename Derived>
inline bool IncrementReferenceCountIfNonZero(
const AtomicReferenceCount<Derived>& base) {
uint32_t count = base.ref_count_.load(std::memory_order_relaxed);
do {
if (count == 0) return false;
} while (!base.ref_count_.compare_exchange_weak(count, count + 1,
std::memory_order_acq_rel));
return true;
}
template <typename Derived>
inline bool DecrementReferenceCount(const AtomicReferenceCount<Derived>& base) {
return base.ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1;
}
template <typename T>
bool DecrementReferenceCountIfGreaterThanOne(std::atomic<T>& reference_count) {
auto count = reference_count.load(std::memory_order_relaxed);
while (true) {
if (count == 1) return false;
if (reference_count.compare_exchange_weak(count, count - 1,
std::memory_order_acq_rel)) {
return true;
}
}
}
struct DefaultIntrusivePtrTraits {
template <typename U>
using pointer = U*;
template <typename Pointer>
static void increment(Pointer p) noexcept {
intrusive_ptr_increment(p);
}
template <typename Pointer>
static void decrement(Pointer p) noexcept {
intrusive_ptr_decrement(p);
}
};
struct acquire_object_ref_t {
explicit constexpr acquire_object_ref_t() = default;
};
struct adopt_object_ref_t {
explicit constexpr adopt_object_ref_t() = default;
};
constexpr acquire_object_ref_t acquire_object_ref{};
constexpr adopt_object_ref_t adopt_object_ref{};
template <typename T, typename R>
class IntrusivePtr;
template <typename T>
struct IsIntrusivePtr : public std::false_type {};
template <typename T, typename R>
struct IsIntrusivePtr<IntrusivePtr<T, R>> : public std::true_type {};
template <typename T, typename R = DefaultIntrusivePtrTraits>
class IntrusivePtr {
public:
using element_type = T;
using traits_type = R;
using pointer = typename R::template pointer<T>;
~IntrusivePtr() {
if (pointer p = get()) R::decrement(p);
}
constexpr IntrusivePtr() noexcept : ptr_(nullptr) {}
constexpr IntrusivePtr(std::nullptr_t) noexcept : ptr_(nullptr) {}
explicit IntrusivePtr(pointer p) noexcept : ptr_(p) {
if (ptr_) R::increment(ptr_);
}
explicit IntrusivePtr(pointer p, acquire_object_ref_t) noexcept : ptr_(p) {
if (ptr_) R::increment(ptr_);
}
constexpr explicit IntrusivePtr(pointer p, adopt_object_ref_t) noexcept
: ptr_(p) {}
IntrusivePtr(const IntrusivePtr& rhs) noexcept
: IntrusivePtr(rhs.get(), acquire_object_ref) {}
IntrusivePtr& operator=(const IntrusivePtr& rhs) noexcept {
IntrusivePtr(rhs).swap(*this);
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>* = nullptr>
IntrusivePtr(const IntrusivePtr<U, R>& rhs) noexcept
: IntrusivePtr(rhs.get(), acquire_object_ref) {}
template <typename U, typename = std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>>
IntrusivePtr& operator=(const IntrusivePtr<U, R>& rhs) noexcept {
IntrusivePtr(rhs).swap(*this);
return *this;
}
constexpr IntrusivePtr(IntrusivePtr&& rhs) noexcept
: IntrusivePtr(rhs.release(), adopt_object_ref) {}
constexpr IntrusivePtr& operator=(IntrusivePtr&& rhs) noexcept {
IntrusivePtr(std::move(rhs)).swap(*this);
return *this;
}
template <typename U,
std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>* = nullptr>
constexpr IntrusivePtr(IntrusivePtr<U, R>&& rhs) noexcept
: IntrusivePtr(rhs.release(), adopt_object_ref) {}
template <typename U, typename = std::enable_if_t<std::is_convertible_v<
typename R::template pointer<U>, pointer>>>
constexpr IntrusivePtr& operator=(IntrusivePtr<U, R>&& rhs) noexcept {
IntrusivePtr(std::move(rhs)).swap(*this);
return *this;
}
void reset() noexcept { IntrusivePtr().swap(*this); }
void reset(std::nullptr_t) noexcept { IntrusivePtr().swap(*this); }
void reset(pointer rhs) { IntrusivePtr(rhs, acquire_object_ref).swap(*this); }
void reset(pointer rhs, acquire_object_ref_t) {
IntrusivePtr(rhs, acquire_object_ref).swap(*this);
}
void reset(pointer rhs, adopt_object_ref_t) {
IntrusivePtr(rhs, adopt_object_ref).swap(*this);
}
constexpr explicit operator bool() const { return static_cast<bool>(ptr_); }
constexpr pointer get() const noexcept { return ptr_; }
constexpr pointer operator->() const {
pointer ptr = get();
assert(static_cast<bool>(ptr));
return ptr;
}
constexpr element_type& operator*() const {
pointer ptr = get();
assert(static_cast<bool>(ptr));
return *ptr;
}
constexpr pointer release() noexcept {
pointer ptr = get();
ptr_ = pointer{};
return ptr;
}
void swap(IntrusivePtr& rhs) noexcept {
std::swap(ptr_, rhs.ptr_);
}
template <typename H>
friend H AbslHashValue(H h, const IntrusivePtr& x) {
return H::combine(std::move(h), x.get());
}
friend bool operator==(const IntrusivePtr& p, std::nullptr_t) { return !p; }
friend bool operator!=(const IntrusivePtr& p, std::nullptr_t) {
return static_cast<bool>(p);
}
friend bool operator==(std::nullptr_t, const IntrusivePtr& p) { return !p; }
friend bool operator!=(std::nullptr_t, const IntrusivePtr& p) {
return static_cast<bool>(p);
}
private:
pointer ptr_;
};
template <typename T, typename R>
inline T* to_address(const IntrusivePtr<T, R>& p) {
return to_address(p.get());
}
template <typename T, typename U, typename R>
inline std::enable_if_t<IsEqualityComparable<typename R::template pointer<T>,
typename R::template pointer<U>>,
bool>
operator==(const IntrusivePtr<T, R>& x, const IntrusivePtr<U, R>& y) {
return x.get() == y.get();
}
template <typename T, typename U, typename R>
inline std::enable_if_t<IsEqualityComparable<typename R::template pointer<T>,
typename R::template pointer<U>>,
bool>
operator!=(const IntrusivePtr<T, R>& x, const IntrusivePtr<U, R>& y) {
return x.get() != y.get();
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> static_pointer_cast(IntrusivePtr<U, R> p) {
return IntrusivePtr<T, R>(static_pointer_cast<T>(p.release()),
adopt_object_ref);
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> const_pointer_cast(IntrusivePtr<U, R> p) {
return IntrusivePtr<T, R>(const_pointer_cast<T>(p.release()),
adopt_object_ref);
}
template <typename T, typename U, typename R>
inline IntrusivePtr<T, R> dynamic_pointer_cast(IntrusivePtr<U, R> p) {
if (auto new_pointer = dynamic_pointer_cast<T>(p.get())) {
p.release();
return IntrusivePtr<T, R>(std::move(new_pointer), adopt_object_ref);
} else {
return IntrusivePtr<T, R>(std::move(new_pointer), adopt_object_ref);
}
}
template <typename T, typename Traits>
std::shared_ptr<T> IntrusiveToShared(internal::IntrusivePtr<T, Traits> p) {
auto* ptr = p.get();
return std::shared_ptr<T>(
std::make_shared<internal::IntrusivePtr<T, Traits>>(std::move(p)), ptr);
}
template <typename T, typename R = DefaultIntrusivePtrTraits, typename... Args>
inline IntrusivePtr<T, R> MakeIntrusivePtr(Args&&... args) {
return IntrusivePtr<T, R>(new T(std::forward<Args>(args)...));
}
}
}
#endif | #include "tensorstore/internal/intrusive_ptr.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "tensorstore/internal/memory.h"
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal::AtomicReferenceCount;
using ::tensorstore::internal::const_pointer_cast;
using ::tensorstore::internal::dynamic_pointer_cast;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::static_pointer_cast;
namespace default_behavior {
struct X : public AtomicReferenceCount<X> {
virtual ~X() = default;
};
struct Y : public X {
virtual ~Y() = default;
};
TEST(IntrusivePtrTest, DefaultConstructor) {
IntrusivePtr<X> p;
EXPECT_EQ(p.get(), nullptr);
EXPECT_EQ(p, p);
EXPECT_EQ(p.get(), static_cast<X*>(nullptr));
EXPECT_EQ(p, nullptr);
EXPECT_EQ(nullptr, p);
}
TEST(IntrusivePtrTest, PointerConstructor) {
X* x = new X;
IntrusivePtr<X> p(x, acquire_object_ref);
EXPECT_EQ(p.get(), x);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(p, p);
EXPECT_NE(p, nullptr);
EXPECT_NE(nullptr, p);
EXPECT_EQ(x, p.operator->());
EXPECT_EQ(x, &*p);
}
TEST(IntrusivePtrTest, ConstructFromDerivedPointer) {
IntrusivePtr<X> p(new Y);
}
TEST(IntrusivePtrTest, PointerConstructorNoAddRef) {
X* x = new X;
intrusive_ptr_increment(x);
EXPECT_EQ(1, x->use_count());
IntrusivePtr<X> p(x, adopt_object_ref);
EXPECT_EQ(p.get(), x);
EXPECT_EQ(1, x->use_count());
}
TEST(IntrusivePtrTest, CopyConstructorNonNull) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.get());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, CopyConstructorNull) {
IntrusivePtr<X> p;
IntrusivePtr<X> p2(p);
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, MoveConstructorNonNull) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, MoveConstructorNull) {
IntrusivePtr<X> p;
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, ConvertingCopyConstructorNonNull) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
IntrusivePtr<X> p2(p);
EXPECT_EQ(2, y->use_count());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(y, p.get());
}
TEST(IntrusivePtrTest, ConvertingMoveConstructorNonNull) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(1, y->use_count());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ConvertingCopyConstructorNull) {
IntrusivePtr<Y> p;
IntrusivePtr<X> p2(p);
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ConvertingMoveConstructorNull) {
IntrusivePtr<Y> p;
IntrusivePtr<X> p2(std::move(p));
EXPECT_EQ(nullptr, p2.get());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, CopyAssignment) {
X* x = new X;
IntrusivePtr<X> p(x);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = p;
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.get());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, CopyAssignmentSelf) {
X* x = new X;
IntrusivePtr<X> p(x);
auto& p_ref = p;
p = p_ref;
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p.get());
}
TEST(IntrusivePtrTest, MoveAssignment) {
X* x = new X;
IntrusivePtr<X> p(x);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = std::move(p);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(x, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, MoveAssignmentSelf) {
X* x = new X;
IntrusivePtr<X> p(x);
auto& p_ref = p;
p = std::move(p_ref);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(x, p.get());
}
TEST(IntrusivePtrTest, ConvertingCopyAssignment) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = p;
EXPECT_EQ(2, y->use_count());
EXPECT_EQ(y, p.get());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, ConvertingMoveAssignment) {
Y* y = new Y;
IntrusivePtr<Y> p(y);
X* x2 = new X;
IntrusivePtr<X> p3(x2);
IntrusivePtr<X> p2(x2);
p2 = std::move(p);
EXPECT_EQ(1, y->use_count());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(y, p2.get());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, Swap) {
X* x = new X;
X* x2 = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x2);
p.swap(p2);
EXPECT_EQ(x, p2.get());
EXPECT_EQ(x2, p.get());
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(1, x2->use_count());
}
TEST(IntrusivePtrTest, BoolConversion) {
IntrusivePtr<X> p;
EXPECT_FALSE(static_cast<bool>(p));
IntrusivePtr<X> p2(new X);
EXPECT_TRUE(static_cast<bool>(p2));
}
TEST(IntrusivePtrTest, Detach) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p.release());
EXPECT_EQ(nullptr, p.get());
EXPECT_EQ(2, x->use_count());
p.reset(x, adopt_object_ref);
}
TEST(IntrusivePtrTest, ResetNoArg) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
p.reset();
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ResetNullptr) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
EXPECT_EQ(2, x->use_count());
p.reset(static_cast<X*>(nullptr));
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p.get());
}
TEST(IntrusivePtrTest, ResetPointerAddRef) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
IntrusivePtr<X> p3(new X);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(1, p3->use_count());
p.reset(p3.get());
EXPECT_EQ(2, p3->use_count());
EXPECT_EQ(p3.get(), p.get());
EXPECT_EQ(1, x->use_count());
}
TEST(IntrusivePtrTest, ResetPointerNoAddRef) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x);
IntrusivePtr<X> p3(new X);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(1, p3->use_count());
p.reset(p3.get(), adopt_object_ref);
EXPECT_EQ(1, p3->use_count());
EXPECT_EQ(p3.get(), p.get());
EXPECT_EQ(1, x->use_count());
p.release();
}
TEST(IntrusivePtrTest, Comparison) {
X* x = new X;
X* x2 = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<X> p2(x2);
EXPECT_EQ(p, p);
EXPECT_NE(p, p2);
EXPECT_NE(p, nullptr);
EXPECT_NE(nullptr, p);
}
TEST(IntrusivePtrTest, StaticPointerCast) {
X* x = new Y;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = static_pointer_cast<Y>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, ConstPointerCast) {
X* x = new X;
IntrusivePtr<const X> p(x);
IntrusivePtr<X> p2 = const_pointer_cast<X>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, DynamicPointerCastSuccess) {
X* x = new Y;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(2, x->use_count());
EXPECT_EQ(x, p2.get());
}
TEST(IntrusivePtrTest, DynamicPointerCastFailure) {
X* x = new X;
IntrusivePtr<X> p(x);
IntrusivePtr<Y> p2 = dynamic_pointer_cast<Y>(p);
EXPECT_EQ(1, x->use_count());
EXPECT_EQ(nullptr, p2.get());
}
TEST(IntrusivePtrTest, MakeIntrusive) {
auto x = tensorstore::internal::MakeIntrusivePtr<X>();
EXPECT_EQ(1, x->use_count());
EXPECT_NE(nullptr, x.get());
}
}
namespace custom_increment_decrement_functions {
class X {
public:
X(int v) : v_(v) {}
virtual ~X() = default;
friend void intrusive_ptr_increment(X* p) { ++p->ref_count_; }
friend void intrusive_ptr_decrement(X* p) {
if (--p->ref_count_ == 0) {
delete p;
}
}
uint32_t ref_count_{0};
int v_{0};
};
class Y : public X {
public:
using X::X;
};
TEST(IntrusivePtrTest, CustomIncrementDecrementFunctions) {
IntrusivePtr<X> x1(new X(1));
EXPECT_EQ(1, x1->ref_count_);
IntrusivePtr<X> x2 = x1;
EXPECT_EQ(2, x2->ref_count_);
IntrusivePtr<Y> y1(new Y(2));
IntrusivePtr<X> y2 = y1;
IntrusivePtr<Y> y3 = dynamic_pointer_cast<Y>(y2);
EXPECT_EQ(y2, y1);
EXPECT_EQ(y3, y1);
}
TEST(IntrusivePtrTest, MakeIntrusiveWithCustomIncrementDecrement) {
auto x = tensorstore::internal::MakeIntrusivePtr<X>(1);
EXPECT_EQ(1, x->ref_count_);
EXPECT_NE(nullptr, x.get());
EXPECT_EQ(1, x->v_);
auto y = tensorstore::internal::MakeIntrusivePtr<Y>(2);
EXPECT_EQ(1, y->ref_count_);
EXPECT_NE(nullptr, y.get());
EXPECT_EQ(2, y->v_);
}
}
namespace custom_traits {
class X {
public:
X(int v) : v_(v) {}
virtual ~X() = default;
uint32_t ref_count_{0};
int v_{0};
};
class Y : public X {
public:
using X::X;
};
struct XTraits {
template <typename U>
using pointer = U*;
static void increment(X* p) noexcept { ++p->ref_count_; }
static void decrement(X* p) noexcept {
if (--p->ref_count_ == 0) delete p;
}
};
TEST(IntrusivePtrTest, CustomTraits) {
IntrusivePtr<X, XTraits> x1(new X(2));
EXPECT_EQ(1, x1->ref_count_);
IntrusivePtr<X, XTraits> x2 = x1;
EXPECT_EQ(2, x2->ref_count_);
IntrusivePtr<Y, XTraits> y1(new Y(3));
IntrusivePtr<X, XTraits> y2 = y1;
IntrusivePtr<Y, XTraits> y3 = dynamic_pointer_cast<Y>(y2);
EXPECT_EQ(y2, y1);
EXPECT_EQ(y3, y1);
}
TEST(IntrusivePtrTest, MakeIntrusiveWithCustomTraits) {
auto x = tensorstore::internal::MakeIntrusivePtr<X, XTraits>(2);
EXPECT_EQ(1, x->ref_count_);
EXPECT_NE(nullptr, x.get());
EXPECT_EQ(2, x->v_);
auto y = tensorstore::internal::MakeIntrusivePtr<Y, XTraits>(3);
EXPECT_EQ(1, y->ref_count_);
EXPECT_NE(nullptr, y.get());
EXPECT_EQ(3, y->v_);
}
struct InvokeInDestructorType
: public AtomicReferenceCount<InvokeInDestructorType> {
std::function<void()> invoke_in_destructor;
~InvokeInDestructorType() { invoke_in_destructor(); }
};
TEST(AtomicReferenceCountTest, IncrementReferenceCountIfNonZero) {
AtomicReferenceCount<int> x;
EXPECT_FALSE(IncrementReferenceCountIfNonZero(x));
EXPECT_EQ(0, x.use_count());
intrusive_ptr_increment(&x);
EXPECT_TRUE(IncrementReferenceCountIfNonZero(x));
EXPECT_EQ(2, x.use_count());
}
TEST(AtomicReferenceCountTest,
IncrementReferenceCountIfNonZeroDuringDestructor) {
IntrusivePtr<InvokeInDestructorType> ptr(new InvokeInDestructorType);
{
ASSERT_TRUE(tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr));
IntrusivePtr<InvokeInDestructorType> ptr2(ptr.get(), adopt_object_ref);
ASSERT_TRUE(tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr));
IntrusivePtr<InvokeInDestructorType> ptr3(ptr.get(), adopt_object_ref);
}
bool test_ran = false;
bool could_acquire = false;
ptr->invoke_in_destructor = [&, ptr_copy = ptr.get()] {
test_ran = true;
could_acquire =
tensorstore::internal::IncrementReferenceCountIfNonZero(*ptr_copy);
};
ptr.reset();
EXPECT_TRUE(test_ran);
EXPECT_FALSE(could_acquire);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/intrusive_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/intrusive_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0ca8dc00-99f1-42ca-a3dd-296a910d479f | cpp | abseil/abseil-cpp | span | absl/types/internal/span.h | absl/types/span_test.cc | #ifndef ABSL_TYPES_INTERNAL_SPAN_H_
#define ABSL_TYPES_INTERNAL_SPAN_H_
#include <algorithm>
#include <cstddef>
#include <string>
#include <type_traits>
#include "absl/algorithm/algorithm.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename T>
class Span;
namespace span_internal {
template <typename C>
constexpr auto GetDataImpl(C& c, char) noexcept
-> decltype(c.data()) {
return c.data();
}
inline char* GetDataImpl(std::string& s,
int) noexcept {
return &s[0];
}
template <typename C>
constexpr auto GetData(C& c) noexcept
-> decltype(GetDataImpl(c, 0)) {
return GetDataImpl(c, 0);
}
template <typename C>
using HasSize =
std::is_integral<absl::decay_t<decltype(std::declval<C&>().size())>>;
template <typename T, typename C>
using HasData =
std::is_convertible<absl::decay_t<decltype(GetData(std::declval<C&>()))>*,
T* const*>;
template <typename C>
struct ElementType {
using type = typename absl::remove_reference_t<C>::value_type;
};
template <typename T, size_t N>
struct ElementType<T (&)[N]> {
using type = T;
};
template <typename C>
using ElementT = typename ElementType<C>::type;
template <typename T>
using EnableIfMutable =
typename std::enable_if<!std::is_const<T>::value, int>::type;
template <template <typename> class SpanT, typename T>
bool EqualImpl(SpanT<T> a, SpanT<T> b) {
static_assert(std::is_const<T>::value, "");
return std::equal(a.begin(), a.end(), b.begin(), b.end());
}
template <template <typename> class SpanT, typename T>
bool LessThanImpl(SpanT<T> a, SpanT<T> b) {
static_assert(std::is_const<T>::value, "");
return std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end());
}
template <typename From, typename To>
using EnableIfConvertibleTo =
typename std::enable_if<std::is_convertible<From, To>::value>::type;
template <typename T, typename = void, typename = void>
struct IsView {
static constexpr bool value = false;
};
template <typename T>
struct IsView<
T, absl::void_t<decltype(span_internal::GetData(std::declval<const T&>()))>,
absl::void_t<decltype(span_internal::GetData(std::declval<T&>()))>> {
private:
using Container = std::remove_const_t<T>;
using ConstData =
decltype(span_internal::GetData(std::declval<const Container&>()));
using MutData = decltype(span_internal::GetData(std::declval<Container&>()));
public:
static constexpr bool value = std::is_same<ConstData, MutData>::value;
};
template <typename T>
using EnableIfIsView = std::enable_if_t<IsView<T>::value, int>;
template <typename T>
using EnableIfNotIsView = std::enable_if_t<!IsView<T>::value, int>;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/types/span.h"
#include <array>
#include <initializer_list>
#include <numeric>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/base/options.h"
#include "absl/container/fixed_array.h"
#include "absl/container/inlined_vector.h"
#include "absl/hash/hash_testing.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
namespace {
static_assert(!absl::type_traits_internal::IsOwner<absl::Span<int>>::value &&
absl::type_traits_internal::IsView<absl::Span<int>>::value,
"Span is a view, not an owner");
MATCHER_P(DataIs, data,
absl::StrCat("data() ", negation ? "isn't " : "is ",
testing::PrintToString(data))) {
return arg.data() == data;
}
template <typename T>
auto SpanIs(T data, size_t size)
-> decltype(testing::AllOf(DataIs(data), testing::SizeIs(size))) {
return testing::AllOf(DataIs(data), testing::SizeIs(size));
}
template <typename Container>
auto SpanIs(const Container& c) -> decltype(SpanIs(c.data(), c.size())) {
return SpanIs(c.data(), c.size());
}
std::vector<int> MakeRamp(int len, int offset = 0) {
std::vector<int> v(len);
std::iota(v.begin(), v.end(), offset);
return v;
}
TEST(IntSpan, EmptyCtors) {
absl::Span<int> s;
EXPECT_THAT(s, SpanIs(nullptr, 0));
}
TEST(IntSpan, PtrLenCtor) {
int a[] = {1, 2, 3};
absl::Span<int> s(&a[0], 2);
EXPECT_THAT(s, SpanIs(a, 2));
}
TEST(IntSpan, ArrayCtor) {
int a[] = {1, 2, 3};
absl::Span<int> s(a);
EXPECT_THAT(s, SpanIs(a, 3));
EXPECT_TRUE((std::is_constructible<absl::Span<const int>, int[3]>::value));
EXPECT_TRUE(
(std::is_constructible<absl::Span<const int>, const int[3]>::value));
EXPECT_FALSE((std::is_constructible<absl::Span<int>, const int[3]>::value));
EXPECT_TRUE((std::is_convertible<int[3], absl::Span<const int>>::value));
EXPECT_TRUE(
(std::is_convertible<const int[3], absl::Span<const int>>::value));
}
template <typename T>
void TakesGenericSpan(absl::Span<T>) {}
TEST(IntSpan, ContainerCtor) {
std::vector<int> empty;
absl::Span<int> s_empty(empty);
EXPECT_THAT(s_empty, SpanIs(empty));
std::vector<int> filled{1, 2, 3};
absl::Span<int> s_filled(filled);
EXPECT_THAT(s_filled, SpanIs(filled));
absl::Span<int> s_from_span(filled);
EXPECT_THAT(s_from_span, SpanIs(s_filled));
absl::Span<const int> const_filled = filled;
EXPECT_THAT(const_filled, SpanIs(filled));
absl::Span<const int> const_from_span = s_filled;
EXPECT_THAT(const_from_span, SpanIs(s_filled));
EXPECT_TRUE(
(std::is_convertible<std::vector<int>&, absl::Span<const int>>::value));
EXPECT_TRUE(
(std::is_convertible<absl::Span<int>&, absl::Span<const int>>::value));
TakesGenericSpan(absl::Span<int>(filled));
}
struct ContainerWithShallowConstData {
std::vector<int> storage;
int* data() const { return const_cast<int*>(storage.data()); }
int size() const { return storage.size(); }
};
TEST(IntSpan, ShallowConstness) {
const ContainerWithShallowConstData c{MakeRamp(20)};
absl::Span<int> s(
c);
s[0] = -1;
EXPECT_EQ(c.storage[0], -1);
}
TEST(CharSpan, StringCtor) {
std::string empty = "";
absl::Span<char> s_empty(empty);
EXPECT_THAT(s_empty, SpanIs(empty));
std::string abc = "abc";
absl::Span<char> s_abc(abc);
EXPECT_THAT(s_abc, SpanIs(abc));
absl::Span<const char> s_const_abc = abc;
EXPECT_THAT(s_const_abc, SpanIs(abc));
EXPECT_FALSE((std::is_constructible<absl::Span<int>, std::string>::value));
EXPECT_FALSE(
(std::is_constructible<absl::Span<const int>, std::string>::value));
EXPECT_TRUE(
(std::is_convertible<std::string, absl::Span<const char>>::value));
}
TEST(IntSpan, FromConstPointer) {
EXPECT_TRUE((std::is_constructible<absl::Span<const int* const>,
std::vector<int*>>::value));
EXPECT_TRUE((std::is_constructible<absl::Span<const int* const>,
std::vector<const int*>>::value));
EXPECT_FALSE((
std::is_constructible<absl::Span<const int*>, std::vector<int*>>::value));
EXPECT_FALSE((
std::is_constructible<absl::Span<int*>, std::vector<const int*>>::value));
}
struct TypeWithMisleadingData {
int& data() { return i; }
int size() { return 1; }
int i;
};
struct TypeWithMisleadingSize {
int* data() { return &i; }
const char* size() { return "1"; }
int i;
};
TEST(IntSpan, EvilTypes) {
EXPECT_FALSE(
(std::is_constructible<absl::Span<int>, TypeWithMisleadingData&>::value));
EXPECT_FALSE(
(std::is_constructible<absl::Span<int>, TypeWithMisleadingSize&>::value));
}
struct Base {
int* data() { return &i; }
int size() { return 1; }
int i;
};
struct Derived : Base {};
TEST(IntSpan, SpanOfDerived) {
EXPECT_TRUE((std::is_constructible<absl::Span<int>, Base&>::value));
EXPECT_TRUE((std::is_constructible<absl::Span<int>, Derived&>::value));
EXPECT_FALSE(
(std::is_constructible<absl::Span<Base>, std::vector<Derived>>::value));
}
void TestInitializerList(absl::Span<const int> s, const std::vector<int>& v) {
EXPECT_TRUE(std::equal(s.begin(), s.end(), v.begin(), v.end()));
}
TEST(ConstIntSpan, InitializerListConversion) {
TestInitializerList({}, {});
TestInitializerList({1}, {1});
TestInitializerList({1, 2, 3}, {1, 2, 3});
EXPECT_FALSE((std::is_constructible<absl::Span<int>,
std::initializer_list<int>>::value));
EXPECT_FALSE((
std::is_convertible<absl::Span<int>, std::initializer_list<int>>::value));
}
TEST(IntSpan, Data) {
int i;
absl::Span<int> s(&i, 1);
EXPECT_EQ(&i, s.data());
}
TEST(IntSpan, SizeLengthEmpty) {
absl::Span<int> empty;
EXPECT_EQ(empty.size(), 0);
EXPECT_TRUE(empty.empty());
EXPECT_EQ(empty.size(), empty.length());
auto v = MakeRamp(10);
absl::Span<int> s(v);
EXPECT_EQ(s.size(), 10);
EXPECT_FALSE(s.empty());
EXPECT_EQ(s.size(), s.length());
}
TEST(IntSpan, ElementAccess) {
auto v = MakeRamp(10);
absl::Span<int> s(v);
for (int i = 0; i < s.size(); ++i) {
EXPECT_EQ(s[i], s.at(i));
}
EXPECT_EQ(s.front(), s[0]);
EXPECT_EQ(s.back(), s[9]);
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
EXPECT_DEATH_IF_SUPPORTED(s[-1], "");
EXPECT_DEATH_IF_SUPPORTED(s[10], "");
#endif
}
TEST(IntSpan, AtThrows) {
auto v = MakeRamp(10);
absl::Span<int> s(v);
EXPECT_EQ(s.at(9), 9);
ABSL_BASE_INTERNAL_EXPECT_FAIL(s.at(10), std::out_of_range,
"failed bounds check");
}
TEST(IntSpan, RemovePrefixAndSuffix) {
auto v = MakeRamp(20, 1);
absl::Span<int> s(v);
EXPECT_EQ(s.size(), 20);
s.remove_suffix(0);
s.remove_prefix(0);
EXPECT_EQ(s.size(), 20);
s.remove_prefix(1);
EXPECT_EQ(s.size(), 19);
EXPECT_EQ(s[0], 2);
s.remove_suffix(1);
EXPECT_EQ(s.size(), 18);
EXPECT_EQ(s.back(), 19);
s.remove_prefix(7);
EXPECT_EQ(s.size(), 11);
EXPECT_EQ(s[0], 9);
s.remove_suffix(11);
EXPECT_EQ(s.size(), 0);
EXPECT_EQ(v, MakeRamp(20, 1));
#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
absl::Span<int> prefix_death(v);
EXPECT_DEATH_IF_SUPPORTED(prefix_death.remove_prefix(21), "");
absl::Span<int> suffix_death(v);
EXPECT_DEATH_IF_SUPPORTED(suffix_death.remove_suffix(21), "");
#endif
}
TEST(IntSpan, Subspan) {
std::vector<int> empty;
EXPECT_EQ(absl::MakeSpan(empty).subspan(), empty);
EXPECT_THAT(absl::MakeSpan(empty).subspan(0, 0), SpanIs(empty));
EXPECT_THAT(absl::MakeSpan(empty).subspan(0, absl::Span<const int>::npos),
SpanIs(empty));
auto ramp = MakeRamp(10);
EXPECT_THAT(absl::MakeSpan(ramp).subspan(), SpanIs(ramp));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(0, 10), SpanIs(ramp));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(0, absl::Span<const int>::npos),
SpanIs(ramp));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(0, 3), SpanIs(ramp.data(), 3));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(5, absl::Span<const int>::npos),
SpanIs(ramp.data() + 5, 5));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(3, 3), SpanIs(ramp.data() + 3, 3));
EXPECT_THAT(absl::MakeSpan(ramp).subspan(10, 5), SpanIs(ramp.data() + 10, 0));
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(absl::MakeSpan(ramp).subspan(11, 5), std::out_of_range);
#else
EXPECT_DEATH_IF_SUPPORTED(absl::MakeSpan(ramp).subspan(11, 5), "");
#endif
}
TEST(IntSpan, First) {
std::vector<int> empty;
EXPECT_THAT(absl::MakeSpan(empty).first(0), SpanIs(empty));
auto ramp = MakeRamp(10);
EXPECT_THAT(absl::MakeSpan(ramp).first(0), SpanIs(ramp.data(), 0));
EXPECT_THAT(absl::MakeSpan(ramp).first(10), SpanIs(ramp));
EXPECT_THAT(absl::MakeSpan(ramp).first(3), SpanIs(ramp.data(), 3));
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(absl::MakeSpan(ramp).first(11), std::out_of_range);
#else
EXPECT_DEATH_IF_SUPPORTED(absl::MakeSpan(ramp).first(11), "");
#endif
}
TEST(IntSpan, Last) {
std::vector<int> empty;
EXPECT_THAT(absl::MakeSpan(empty).last(0), SpanIs(empty));
auto ramp = MakeRamp(10);
EXPECT_THAT(absl::MakeSpan(ramp).last(0), SpanIs(ramp.data() + 10, 0));
EXPECT_THAT(absl::MakeSpan(ramp).last(10), SpanIs(ramp));
EXPECT_THAT(absl::MakeSpan(ramp).last(3), SpanIs(ramp.data() + 7, 3));
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(absl::MakeSpan(ramp).last(11), std::out_of_range);
#else
EXPECT_DEATH_IF_SUPPORTED(absl::MakeSpan(ramp).last(11), "");
#endif
}
TEST(IntSpan, MakeSpanPtrLength) {
std::vector<int> empty;
auto s_empty = absl::MakeSpan(empty.data(), empty.size());
EXPECT_THAT(s_empty, SpanIs(empty));
std::array<int, 3> a{{1, 2, 3}};
auto s = absl::MakeSpan(a.data(), a.size());
EXPECT_THAT(s, SpanIs(a));
EXPECT_THAT(absl::MakeConstSpan(empty.data(), empty.size()), SpanIs(s_empty));
EXPECT_THAT(absl::MakeConstSpan(a.data(), a.size()), SpanIs(s));
}
TEST(IntSpan, MakeSpanTwoPtrs) {
std::vector<int> empty;
auto s_empty = absl::MakeSpan(empty.data(), empty.data());
EXPECT_THAT(s_empty, SpanIs(empty));
std::vector<int> v{1, 2, 3};
auto s = absl::MakeSpan(v.data(), v.data() + 1);
EXPECT_THAT(s, SpanIs(v.data(), 1));
EXPECT_THAT(absl::MakeConstSpan(empty.data(), empty.data()), SpanIs(s_empty));
EXPECT_THAT(absl::MakeConstSpan(v.data(), v.data() + 1), SpanIs(s));
}
TEST(IntSpan, MakeSpanContainer) {
std::vector<int> empty;
auto s_empty = absl::MakeSpan(empty);
EXPECT_THAT(s_empty, SpanIs(empty));
std::vector<int> v{1, 2, 3};
auto s = absl::MakeSpan(v);
EXPECT_THAT(s, SpanIs(v));
EXPECT_THAT(absl::MakeConstSpan(empty), SpanIs(s_empty));
EXPECT_THAT(absl::MakeConstSpan(v), SpanIs(s));
EXPECT_THAT(absl::MakeSpan(s), SpanIs(s));
EXPECT_THAT(absl::MakeConstSpan(s), SpanIs(s));
}
TEST(CharSpan, MakeSpanString) {
std::string empty = "";
auto s_empty = absl::MakeSpan(empty);
EXPECT_THAT(s_empty, SpanIs(empty));
std::string str = "abc";
auto s_str = absl::MakeSpan(str);
EXPECT_THAT(s_str, SpanIs(str));
EXPECT_THAT(absl::MakeConstSpan(empty), SpanIs(s_empty));
EXPECT_THAT(absl::MakeConstSpan(str), SpanIs(s_str));
}
TEST(IntSpan, MakeSpanArray) {
int a[] = {1, 2, 3};
auto s = absl::MakeSpan(a);
EXPECT_THAT(s, SpanIs(a, 3));
const int ca[] = {1, 2, 3};
auto s_ca = absl::MakeSpan(ca);
EXPECT_THAT(s_ca, SpanIs(ca, 3));
EXPECT_THAT(absl::MakeConstSpan(a), SpanIs(s));
EXPECT_THAT(absl::MakeConstSpan(ca), SpanIs(s_ca));
}
template <typename Expected, typename T>
void CheckType(const T& ) {
testing::StaticAssertTypeEq<Expected, T>();
}
TEST(IntSpan, MakeSpanTypes) {
std::vector<int> vec;
const std::vector<int> cvec;
int a[1];
const int ca[] = {1};
int* ip = a;
const int* cip = ca;
std::string s = "";
const std::string cs = "";
CheckType<absl::Span<int>>(absl::MakeSpan(vec));
CheckType<absl::Span<const int>>(absl::MakeSpan(cvec));
CheckType<absl::Span<int>>(absl::MakeSpan(ip, ip + 1));
CheckType<absl::Span<int>>(absl::MakeSpan(ip, 1));
CheckType<absl::Span<const int>>(absl::MakeSpan(cip, cip + 1));
CheckType<absl::Span<const int>>(absl::MakeSpan(cip, 1));
CheckType<absl::Span<int>>(absl::MakeSpan(a));
CheckType<absl::Span<int>>(absl::MakeSpan(a, a + 1));
CheckType<absl::Span<int>>(absl::MakeSpan(a, 1));
CheckType<absl::Span<const int>>(absl::MakeSpan(ca));
CheckType<absl::Span<const int>>(absl::MakeSpan(ca, ca + 1));
CheckType<absl::Span<const int>>(absl::MakeSpan(ca, 1));
CheckType<absl::Span<char>>(absl::MakeSpan(s));
CheckType<absl::Span<const char>>(absl::MakeSpan(cs));
}
TEST(ConstIntSpan, MakeConstSpanTypes) {
std::vector<int> vec;
const std::vector<int> cvec;
int array[1];
const int carray[] = {0};
int* ptr = array;
const int* cptr = carray;
std::string s = "";
std::string cs = "";
CheckType<absl::Span<const int>>(absl::MakeConstSpan(vec));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(cvec));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(ptr, ptr + 1));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(ptr, 1));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(cptr, cptr + 1));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(cptr, 1));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(array));
CheckType<absl::Span<const int>>(absl::MakeConstSpan(carray));
CheckType<absl::Span<const char>>(absl::MakeConstSpan(s));
CheckType<absl::Span<const char>>(absl::MakeConstSpan(cs));
}
TEST(IntSpan, Equality) {
const int arr1[] = {1, 2, 3, 4, 5};
int arr2[] = {1, 2, 3, 4, 5};
std::vector<int> vec1(std::begin(arr1), std::end(arr1));
std::vector<int> vec2 = vec1;
std::vector<int> other_vec = {2, 4, 6, 8, 10};
const absl::Span<const int> from1 = vec1;
const absl::Span<const int> from2 = vec2;
EXPECT_EQ(from1, from1);
EXPECT_FALSE(from1 != from1);
EXPECT_EQ(from1, from2);
EXPECT_FALSE(from1 != from2);
const absl::Span<const int> from_other = other_vec;
EXPECT_NE(from1, from_other);
EXPECT_FALSE(from1 == from_other);
EXPECT_EQ(vec1, from1);
EXPECT_FALSE(vec1 != from1);
EXPECT_EQ(from1, vec1);
EXPECT_FALSE(from1 != vec1);
const absl::Span<int> mutable_from1(vec1);
const absl::Span<int> mutable_from2(vec2);
EXPECT_EQ(from1, mutable_from1);
EXPECT_EQ(mutable_from1, from1);
EXPECT_EQ(mutable_from1, mutable_from2);
EXPECT_EQ(mutable_from2, mutable_from1);
EXPECT_EQ(vec1, mutable_from1);
EXPECT_FALSE(vec1 != mutable_from1);
EXPECT_EQ(mutable_from1, vec1);
EXPECT_FALSE(mutable_from1 != vec1);
EXPECT_TRUE(arr1 == mutable_from1);
EXPECT_FALSE(arr1 != mutable_from1);
EXPECT_TRUE(mutable_from1 == arr1);
EXPECT_FALSE(mutable_from1 != arr1);
EXPECT_TRUE(arr2 == from1);
EXPECT_FALSE(arr2 != from1);
EXPECT_TRUE(from1 == arr2);
EXPECT_FALSE(from1 != arr2);
EXPECT_NE(from1, absl::Span<const int>(from1).subspan(0, from1.size() - 1));
++vec2.back();
EXPECT_NE(from1, from2);
}
class IntSpanOrderComparisonTest : public testing::Test {
public:
IntSpanOrderComparisonTest()
: arr_before_{1, 2, 3},
arr_after_{1, 2, 4},
carr_after_{1, 2, 4},
vec_before_(std::begin(arr_before_), std::end(arr_before_)),
vec_after_(std::begin(arr_after_), std::end(arr_after_)),
before_(vec_before_),
after_(vec_after_),
cbefore_(vec_before_),
cafter_(vec_after_) {}
protected:
int arr_before_[3], arr_after_[3];
const int carr_after_[3];
std::vector<int> vec_before_, vec_after_;
absl::Span<int> before_, after_;
absl::Span<const int> cbefore_, cafter_;
};
TEST_F(IntSpanOrderComparisonTest, CompareSpans) {
EXPECT_TRUE(cbefore_ < cafter_);
EXPECT_TRUE(cbefore_ <= cafter_);
EXPECT_TRUE(cafter_ > cbefore_);
EXPECT_TRUE(cafter_ >= cbefore_);
EXPECT_FALSE(cbefore_ > cafter_);
EXPECT_FALSE(cafter_ < cbefore_);
EXPECT_TRUE(before_ < after_);
EXPECT_TRUE(before_ <= after_);
EXPECT_TRUE(after_ > before_);
EXPECT_TRUE(after_ >= before_);
EXPECT_FALSE(before_ > after_);
EXPECT_FALSE(after_ < before_);
EXPECT_TRUE(cbefore_ < after_);
EXPECT_TRUE(cbefore_ <= after_);
EXPECT_TRUE(after_ > cbefore_);
EXPECT_TRUE(after_ >= cbefore_);
EXPECT_FALSE(cbefore_ > after_);
EXPECT_FALSE(after_ < cbefore_);
}
TEST_F(IntSpanOrderComparisonTest, SpanOfConstAndContainer) {
EXPECT_TRUE(cbefore_ < vec_after_);
EXPECT_TRUE(cbefore_ <= vec_after_);
EXPECT_TRUE(vec_after_ > cbefore_);
EXPECT_TRUE(vec_after_ >= cbefore_);
EXPECT_FALSE(cbefore_ > vec_after_);
EXPECT_FALSE(vec_after_ < cbefore_);
EXPECT_TRUE(arr_before_ < cafter_);
EXPECT_TRUE(arr_before_ <= cafter_);
EXPECT_TRUE(cafter_ > arr_before_);
EXPECT_TRUE(cafter_ >= arr_before_);
EXPECT_FALSE(arr_before_ > cafter_);
EXPECT_FALSE(cafter_ < arr_before_);
}
TEST_F(IntSpanOrderComparisonTest, SpanOfMutableAndContainer) {
EXPECT_TRUE(vec_before_ < after_);
EXPECT_TRUE(vec_before_ <= after_);
EXPECT_TRUE(after_ > vec_before_);
EXPECT_TRUE(after_ >= vec_before_);
EXPECT_FALSE(vec_before_ > after_);
EXPECT_FALSE(after_ < vec_before_);
EXPECT_TRUE(before_ < carr_after_);
EXPECT_TRUE(before_ <= carr_after_);
EXPECT_TRUE(carr_after_ > before_);
EXPECT_TRUE(carr_after_ >= before_);
EXPECT_FALSE(before_ > carr_after_);
EXPECT_FALSE(carr_after_ < before_);
}
TEST_F(IntSpanOrderComparisonTest, EqualSpans) {
EXPECT_FALSE(before_ < before_);
EXPECT_TRUE(before_ <= before_);
EXPECT_FALSE(before_ > before_);
EXPECT_TRUE(before_ >= before_);
}
TEST_F(IntSpanOrderComparisonTest, Subspans) {
auto subspan = before_.subspan(0, 1);
EXPECT_TRUE(subspan < before_);
EXPECT_TRUE(subspan <= before_);
EXPECT_TRUE(before_ > subspan);
EXPECT_TRUE(before_ >= subspan);
EXPECT_FALSE(subspan > before_);
EXPECT_FALSE(before_ < subspan);
}
TEST_F(IntSpanOrderComparisonTest, EmptySpans) {
absl::Span<int> empty;
EXPECT_FALSE(empty < empty);
EXPECT_TRUE(empty <= empty);
EXPECT_FALSE(empty > empty);
EXPECT_TRUE(empty >= empty);
EXPECT_TRUE(empty < before_);
EXPECT_TRUE(empty <= before_);
EXPECT_TRUE(before_ > empty);
EXPECT_TRUE(before_ >= empty);
EXPECT_FALSE(empty > before_);
EXPECT_FALSE(before_ < empty);
}
TEST(IntSpan, ExposesContainerTypesAndConsts) {
absl::Span<int> slice;
CheckType<absl::Span<int>::iterator>(slice.begin());
EXPECT_TRUE((std::is_convertible<decltype(slice.begin()),
absl::Span<int>::const_iterator>::value));
CheckType<absl::Span<int>::const_iterator>(slice.cbegin());
EXPECT_TRUE((std::is_convertible<decltype(slice.end()),
absl::Span<int>::const_iterator>::value));
CheckType<absl::Span<int>::const_iterator>(slice.cend());
CheckType<absl::Span<int>::reverse_iterator>(slice.rend());
EXPECT_TRUE(
(std::is_convertible<decltype(slice.rend()),
absl::Span<int>::const_reverse_iterator>::value));
CheckType<absl::Span<int>::const_reverse_iterator>(slice.crend());
testing::StaticAssertTypeEq<int, absl::Span<int>::value_type>();
testing::StaticAssertTypeEq<int, absl::Span<const int>::value_type>();
testing::StaticAssertTypeEq<int, absl::Span<int>::element_type>();
testing::StaticAssertTypeEq<const int, absl::Span<const int>::element_type>();
testing::StaticAssertTypeEq<int*, absl::Span<int>::pointer>();
testing::StaticAssertTypeEq<const int*, absl::Span<const int>::pointer>();
testing::StaticAssertTypeEq<int&, absl::Span<int>::reference>();
testing::StaticAssertTypeEq<const int&, absl::Span<const int>::reference>();
testing::StaticAssertTypeEq<const int&, absl::Span<int>::const_reference>();
testing::StaticAssertTypeEq<const int&,
absl::Span<const int>::const_reference>();
EXPECT_EQ(static_cast<absl::Span<int>::size_type>(-1), absl::Span<int>::npos);
}
TEST(IntSpan, IteratorsAndReferences) {
auto accept_pointer = [](int*) {};
auto accept_reference = [](int&) {};
auto accept_iterator = [](absl::Span<int>::iterator) {};
auto accept_const_iterator = [](absl::Span<int>::const_iterator) {};
auto accept_reverse_iterator = [](absl::Span<int>::reverse_iterator) {};
auto accept_const_reverse_iterator =
[](absl::Span<int>::const_reverse_iterator) {};
int a[1];
absl::Span<int> s = a;
accept_pointer(s.data());
accept_iterator(s.begin());
accept_const_iterator(s.begin());
accept_const_iterator(s.cbegin());
accept_iterator(s.end());
accept_const_iterator(s.end());
accept_const_iterator(s.cend());
accept_reverse_iterator(s.rbegin());
accept_const_reverse_iterator(s.rbegin());
accept_const_reverse_iterator(s.crbegin());
accept_reverse_iterator(s.rend());
accept_const_reverse_iterator(s.rend());
accept_const_reverse_iterator(s.crend());
accept_reference(s[0]);
accept_reference(s.at(0));
accept_reference(s.front());
accept_reference(s.back());
}
TEST(IntSpan, IteratorsAndReferences_Const) {
auto accept_pointer = [](int*) {};
auto accept_reference = [](int&) {};
auto accept_iterator = [](absl::Span<int>::iterator) {};
auto accept_const_iterator = [](absl::Span<int>::const_iterator) {};
auto accept_reverse_iterator = [](absl::Span<int>::reverse_iterator) {};
auto accept_const_reverse_iterator =
[](absl::Span<int>::const_reverse_iterator) {};
int a[1];
const absl::Span<int> s = a;
accept_pointer(s.data());
accept_iterator(s.begin());
accept_const_iterator(s.begin());
accept_const_iterator(s.cbegin());
accept_iterator(s.end());
accept_const_iterator(s.end());
accept_const_iterator(s.cend());
accept_reverse_iterator(s.rbegin());
accept_const_reverse_iterator(s.rbegin());
accept_const_reverse_iterator(s.crbegin());
accept_reverse_iterator(s.rend());
accept_const_reverse_iterator(s.rend());
accept_const_reverse_iterator(s.crend());
accept_reference(s[0]);
accept_reference(s.at(0));
accept_reference(s.front());
accept_reference(s.back());
}
TEST(IntSpan, NoexceptTest) {
int a[] = {1, 2, 3};
std::vector<int> v;
EXPECT_TRUE(noexcept(absl::Span<const int>()));
EXPECT_TRUE(noexcept(absl::Span<const int>(a, 2)));
EXPECT_TRUE(noexcept(absl::Span<const int>(a)));
EXPECT_TRUE(noexcept(absl::Span<const int>(v)));
EXPECT_TRUE(noexcept(absl::Span<int>(v)));
EXPECT_TRUE(noexcept(absl::Span<const int>({1, 2, 3})));
EXPECT_TRUE(noexcept(absl::MakeSpan(v)));
EXPECT_TRUE(noexcept(absl::MakeSpan(a)));
EXPECT_TRUE(noexcept(absl::MakeSpan(a, 2)));
EXPECT_TRUE(noexcept(absl::MakeSpan(a, a + 1)));
EXPECT_TRUE(noexcept(absl::MakeConstSpan(v)));
EXPECT_TRUE(noexcept(absl::MakeConstSpan(a)));
EXPECT_TRUE(noexcept(absl::MakeConstSpan(a, 2)));
EXPECT_TRUE(noexcept(absl::MakeConstSpan(a, a + 1)));
absl::Span<int> s(v);
EXPECT_TRUE(noexcept(s.data()));
EXPECT_TRUE(noexcept(s.size()));
EXPECT_TRUE(noexcept(s.length()));
EXPECT_TRUE(noexcept(s.empty()));
EXPECT_TRUE(noexcept(s[0]));
EXPECT_TRUE(noexcept(s.front()));
EXPECT_TRUE(noexcept(s.back()));
EXPECT_TRUE(noexcept(s.begin()));
EXPECT_TRUE(noexcept(s.cbegin()));
EXPECT_TRUE(noexcept(s.end()));
EXPECT_TRUE(noexcept(s.cend()));
EXPECT_TRUE(noexcept(s.rbegin()));
EXPECT_TRUE(noexcept(s.crbegin()));
EXPECT_TRUE(noexcept(s.rend()));
EXPECT_TRUE(noexcept(s.crend()));
EXPECT_TRUE(noexcept(s.remove_prefix(0)));
EXPECT_TRUE(noexcept(s.remove_suffix(0)));
}
template <int i>
struct ConstexprTester {};
#define ABSL_TEST_CONSTEXPR(expr) \
do { \
ABSL_ATTRIBUTE_UNUSED ConstexprTester<(expr, 1)> t; \
} while (0)
struct ContainerWithConstexprMethods {
constexpr int size() const { return 1; }
constexpr const int* data() const { return &i; }
const int i;
};
TEST(ConstIntSpan, ConstexprTest) {
static constexpr int a[] = {1, 2, 3};
static constexpr int sized_arr[2] = {1, 2};
static constexpr ContainerWithConstexprMethods c{1};
ABSL_TEST_CONSTEXPR(absl::Span<const int>());
ABSL_TEST_CONSTEXPR(absl::Span<const int>(a, 2));
ABSL_TEST_CONSTEXPR(absl::Span<const int>(sized_arr));
ABSL_TEST_CONSTEXPR(absl::Span<const int>(c));
ABSL_TEST_CONSTEXPR(absl::MakeSpan(&a[0], 1));
ABSL_TEST_CONSTEXPR(absl::MakeSpan(c));
ABSL_TEST_CONSTEXPR(absl::MakeSpan(a));
ABSL_TEST_CONSTEXPR(absl::MakeConstSpan(&a[0], 1));
ABSL_TEST_CONSTEXPR(absl::MakeConstSpan(c));
ABSL_TEST_CONSTEXPR(absl::MakeConstSpan(a));
constexpr absl::Span<const int> span = c;
ABSL_TEST_CONSTEXPR(span.data());
ABSL_TEST_CONSTEXPR(span.size());
ABSL_TEST_CONSTEXPR(span.length());
ABSL_TEST_CONSTEXPR(span.empty());
ABSL_TEST_CONSTEXPR(span.begin());
ABSL_TEST_CONSTEXPR(span.cbegin());
ABSL_TEST_CONSTEXPR(span.subspan(0, 0));
ABSL_TEST_CONSTEXPR(span.first(1));
ABSL_TEST_CONSTEXPR(span.last(1));
ABSL_TEST_CONSTEXPR(span[0]);
}
struct BigStruct {
char bytes[10000];
};
TEST(Span, SpanSize) {
EXPECT_LE(sizeof(absl::Span<int>), 2 * sizeof(void*));
EXPECT_LE(sizeof(absl::Span<BigStruct>), 2 * sizeof(void*));
}
TEST(Span, Hash) {
int array[] = {1, 2, 3, 4};
int array2[] = {1, 2, 3};
using T = absl::Span<const int>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(
{
T(), T(nullptr, 0), T(array, 0), T(array2, 0),
T(array, 3), T(array2), T({1, 2, 3}),
T(array, 1), T(array, 2),
T(array + 1, 2), T(array + 2, 2)}));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/internal/span.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/span_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
158119dc-7aff-4d3c-b7bc-7f5f67c44426 | cpp | google/arolla | init_arolla | arolla/util/init_arolla.cc | arolla/util/init_arolla_test.cc | #include "arolla/util/init_arolla.h"
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "arolla/util/init_arolla_internal.h"
namespace arolla::init_arolla_internal {
namespace {
bool init_arolla_called = false;
const Registration* registry_head = nullptr;
void RunRegisteredInitializers() {
static absl::NoDestructor<Coordinator> coordinator;
auto* head = std::exchange(registry_head, nullptr);
std::vector<const Initializer*> initializers;
for (auto it = head; it != nullptr; it = it->next) {
initializers.push_back(&it->initializer);
}
auto status = coordinator->Run(initializers);
if (!status.ok()) {
LOG(FATAL) << "Arolla initialization failed: " << status;
}
}
}
Registration::Registration(const Initializer& initializer)
: initializer(initializer), next(registry_head) {
registry_head = this;
}
void InitArollaSecondary() {
if (init_arolla_called) {
RunRegisteredInitializers();
}
}
}
namespace arolla {
void InitArolla() {
[[maybe_unused]] static const bool done = [] {
arolla::init_arolla_internal::init_arolla_called = true;
arolla::init_arolla_internal::RunRegisteredInitializers();
return true;
}();
}
void CheckInitArolla() {
constexpr absl::string_view message =
("The Arolla library is not initialized yet. Please ensure that "
"arolla::InitArolla() was called before using any other Arolla"
" functions."
);
if (!arolla::init_arolla_internal::init_arolla_called) {
LOG(FATAL) << message;
}
}
} | #include "arolla/util/init_arolla.h"
#include <string>
#include <tuple>
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
namespace arolla {
namespace {
struct Buffer {
std::string result;
};
Buffer& buffer() {
static absl::NoDestructor<Buffer> result;
return *result;
}
AROLLA_INITIALIZER(.name = "Foo", .init_fn = [] { buffer().result += "Hello"; })
AROLLA_INITIALIZER(
.name = "Bar", .deps = {"Foo"}, .init_fn = [] {
buffer().result += "World";
return absl::OkStatus();
})
AROLLA_INITIALIZER(.deps = {"Bar"}, .init_fn = [] { buffer().result += "!"; })
TEST(InitArollaTest, Complex) {
{
EXPECT_EQ(buffer().result, "");
}
{
InitArolla();
EXPECT_EQ(buffer().result, "HelloWorld!");
CheckInitArolla();
}
{
InitArolla();
EXPECT_EQ(buffer().result, "HelloWorld!");
CheckInitArolla();
}
{
static constexpr arolla::init_arolla_internal::Initializer
secondary_initializer = {.init_fn = [] { buffer().result += "!!"; }};
[[maybe_unused]] static const arolla::init_arolla_internal::Registration
registration(secondary_initializer);
arolla::init_arolla_internal::InitArollaSecondary();
EXPECT_EQ(buffer().result, "HelloWorld!!!");
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/init_arolla.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/init_arolla_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
598c730c-79a4-40bf-874a-24f60c51c13a | cpp | tensorflow/tensorflow | mlir_hlo_to_hlo | third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.cc | third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo_test.cc | #include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/UseDefLists.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/RegionUtils.h"
#include "stablehlo/dialect/Base.h"
#include "xla/array.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/builder/lib/approx_topk.h"
#include "xla/hlo/builder/lib/approx_topk_shape.h"
#include "xla/hlo/builder/lib/matrix.h"
#include "xla/hlo/builder/lib/slicing.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/translate/mhlo_to_hlo/attribute_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/layout_util.h"
#include "xla/hlo/translate/mhlo_to_hlo/location_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/module_attributes_exporter.h"
#include "xla/hlo/translate/mhlo_to_hlo/stack_frame_index_builder.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/types.h"
using ::int64_t;
using ::tsl::int16;
using ::tsl::int32;
using ::tsl::int8;
using ::tsl::StatusOr;
using ::tsl::uint16;
using ::tsl::uint32;
using ::tsl::uint64;
using ::tsl::uint8;
constexpr char kJaxBufferDonor[] = "jax.buffer_donor";
constexpr char kResultLayout[] = "result_layout";
constexpr char kSourceLayout[] = "source_layout";
constexpr char kAggregateToTopk[] = "aggregate_to_topk";
constexpr char kApiVersion[] = "api_version";
constexpr char kApproxTopK[] = "ApproxTopK";
constexpr char kBackendConfig[] = "backend_config";
constexpr char kCallTargetName[] = "call_target_name";
constexpr char kCalledComputations[] = "called_computations";
constexpr char kHasSideEffect[] = "has_side_effect";
constexpr char kIsFallback[] = "is_fallback";
constexpr char kRecallTarget[] = "recall_target";
constexpr char kReductionDim[] = "reduction_dim";
constexpr char kReductionInputSizeOverride[] = "reduction_input_size_override";
constexpr char kTopK[] = "top_k";
constexpr char kMhloCrossProgramPrefetches[] = "mhlo.cross_program_prefetches";
constexpr char kMhloFrontendAttributes[] = "mhlo.frontend_attributes";
constexpr char kMhloInputOutputAlias[] = "mhlo.input_output_alias";
constexpr char kMhloIsDynamic[] = "mhlo.is_dynamic";
constexpr char kMhloLiteral[] = "mhlo.literal";
constexpr char kMhloParameterReplication[] = "mhlo.parameter_replication";
constexpr char kMhloReplication[] = "mhlo.is_same_data_across_replicas";
constexpr char kMhloSharding[] = "mhlo.sharding";
constexpr char kMhloSpmdOutputSharding[] = "mhlo.spmd_output_sharding";
constexpr char kMhloSpmdParametersShardings[] =
"mhlo.spmd_parameters_shardings";
constexpr char kMhloUseAutoSpmdPartitioning[] =
"mhlo.use_auto_spmd_partitioning";
constexpr char kMhloXlaEntryComputationParameterLayouts[] =
"mhlo.xla_entry_computation_parameter_layouts";
constexpr char kMhloXlaEntryComputationParameterTiles[] =
"mhlo.xla_entry_computation_parameter_tiles";
constexpr char kMhloXlaEntryComputationResultLayout[] =
"mhlo.xla_entry_computation_result_layout";
constexpr char kMhloXlaEntryComputationResultTiles[] =
"mhlo.xla_entry_computation_result_tiles";
constexpr char kArgEmptyTuple[] = "arg_empty_tuple";
constexpr char kArgPrefix[] = "Arg_";
constexpr char kArgTuple[] = "arg_tuple";
constexpr char kDefaultLayoutAttrName[] = "xla_shape";
constexpr char kExecutionThread[] = "execution_thread";
constexpr char kLayout[] = "layout";
constexpr char kMain[] = "main";
constexpr char kRegionPrefix[] = "region_";
constexpr char kTfAliasingOutput[] = "tf.aliasing_output";
template <typename T>
T Unwrap(T t) {
return t;
}
template <typename T>
T* Unwrap(const std::unique_ptr<T>& t) {
return t.get();
}
static mlir::LogicalResult GetXlaOp(
mlir::Value val, const llvm::DenseMap<mlir::Value, xla::XlaOp>& val_map,
xla::XlaOp* result, mlir::Operation* op) {
auto iter = val_map.find(val);
if (iter == val_map.end()) {
return op->emitOpError(
"requires all operands to be defined in the parent region for export");
}
*result = iter->second;
return mlir::success();
}
bool IsBoundedOrStatic(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return false;
if (ranked_ty.hasStaticShape()) return true;
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (!encoding || encoding.getBounds().empty()) return false;
int64_t rank = ranked_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (ranked_ty.isDynamicDim(dim) &&
encoding.getBounds()[dim] == mlir::ShapedType::kDynamic)
return false;
}
return true;
}
template <typename T>
xla::Array<T> ArrayFromDenseElementsAttr(mlir::DenseElementsAttr dense_attr) {
constexpr xla::PrimitiveType type =
xla::primitive_util::NativeToPrimitiveType<T>();
xla::Shape shape = xla::TypeToShape(dense_attr.getType());
xla::Array<T> array(shape.dimensions());
if constexpr (!xla::primitive_util::IsSubByteNonPredType(type)) {
array.SetValues(dense_attr.getValues<T>());
} else {
auto values = dense_attr.getValues<llvm::APInt>();
for (int i = 0; i < values.size(); i++) {
if constexpr (xla::primitive_util::IsUnsignedIntegralType(type)) {
array.data()[i] = T{values[i].getZExtValue()};
} else {
static_assert(xla::primitive_util::IsSignedIntegralType(type));
array.data()[i] = T{values[i].getSExtValue()};
}
}
}
return array;
}
absl::StatusOr<xla::Literal> CreateArrayLiteralFromAttr(mlir::ElementsAttr attr,
xla::Layout layout) {
auto dense_attr = mlir::dyn_cast<mlir::DenseElementsAttr>(attr);
if (!dense_attr)
return tsl::errors::Unimplemented("Only dense elements attr are supported");
xla::Shape shape = xla::TypeToShape(dense_attr.getType());
return xla::primitive_util::PrimitiveTypeSwitch<absl::StatusOr<xla::Literal>>(
[&](auto primitive_type_constant) -> absl::StatusOr<xla::Literal> {
if constexpr (xla::primitive_util::IsArrayType(
primitive_type_constant)) {
using cpp_type =
xla::primitive_util::NativeTypeOf<primitive_type_constant>;
xla::Array<cpp_type> source_data =
ArrayFromDenseElementsAttr<cpp_type>(dense_attr);
return xla::LiteralUtil::CreateFromArrayWithLayout(source_data,
layout);
}
return tsl::errors::Internal(absl::StrCat(
"Unsupported type: ",
xla::PrimitiveType_Name(shape.element_type())));
},
shape.element_type());
}
static int ConvertAPInt(llvm::APInt i) { return i.getSExtValue(); }
static uint32_t Convertuint32_t(uint32_t i) { return i; }
static uint64_t Convertuint64_t(uint64_t i) { return i; }
static double ConvertAPFloat(llvm::APFloat value) {
const auto& semantics = value.getSemantics();
bool losesInfo = false;
if (&semantics != &llvm::APFloat::IEEEdouble())
value.convert(llvm::APFloat::IEEEdouble(),
llvm::APFloat::rmNearestTiesToEven, &losesInfo);
return value.convertToDouble();
}
static inline bool Convertbool(bool value) { return value; }
static absl::string_view ConvertStringRef(mlir::StringRef value) {
return {value.data(), value.size()};
}
static std::vector<int64_t> ConvertDenseIntAttr(
mlir::DenseIntElementsAttr attr) {
auto values = attr.getValues<int64_t>();
return {values.begin(), values.end()};
}
static std::vector<int64_t> ConvertDenseIntAttr(
std::optional<mlir::DenseIntElementsAttr> attr) {
if (!attr) return {};
return ConvertDenseIntAttr(*attr);
}
static std::vector<int64_t> Convert_broadcast_dimensions(
std::optional<mlir::DenseIntElementsAttr> broadcast_dimensions) {
if (!broadcast_dimensions.has_value()) return {};
return ConvertDenseIntAttr(*broadcast_dimensions);
}
static std::vector<xla::CrossProgramPrefetch> Convert_cross_program_prefetches(
mlir::ArrayAttr prefetches) {
std::vector<xla::CrossProgramPrefetch> cross_program_prefetches;
for (auto prefetch : prefetches) {
auto cpp = mlir::cast<mlir::mhlo::CrossProgramPrefetchAttr>(prefetch);
xla::CrossProgramPrefetch xla_cpp;
xla_cpp.set_parameter(cpp.getParameter());
for (auto index : cpp.getIndices()) xla_cpp.add_index(index);
cross_program_prefetches.push_back(xla_cpp);
}
return cross_program_prefetches;
}
static xla::FftType Convert_fft_type(mlir::mhlo::FftType fft_type) {
xla::FftType fft_type_enum;
if (!FftType_Parse(std::string(mlir::mhlo::stringifyFftType(fft_type)),
&fft_type_enum))
return xla::FftType::FFT;
return fft_type_enum;
}
static std::vector<std::pair<int64_t, int64_t>> Convert_padding(
std::optional<mlir::DenseIntElementsAttr> padding) {
return xla::ConvertNx2Attribute(padding).value();
}
static std::optional<bool> Convert_use_global_device_ids(
std::optional<bool> use_global_device_ids) {
if (!use_global_device_ids) return {};
return *use_global_device_ids;
}
static std::vector<std::pair<int64_t, int64_t>> Convert_source_target_pairs(
std::optional<mlir::DenseIntElementsAttr> source_target_pairs) {
return xla::ConvertNx2Attribute(source_target_pairs).value();
}
static std::vector<xla::ReplicaGroup> Convert_replica_groups(
mlir::DenseIntElementsAttr groups) {
return xla::ConvertReplicaGroups(groups).value();
}
static void SetLayout(xla::Shape& shape, mlir::DenseIntElementsAttr layout) {
if (shape.IsArray()) {
shape.mutable_layout()->clear_minor_to_major();
for (auto l : layout) {
shape.mutable_layout()->mutable_minor_to_major()->push_back(
l.getSExtValue());
}
} else if (shape.IsToken()) {
assert(layout.empty() && "Invalid layout for token type");
} else {
assert(!shape.IsTuple() &&
"Exporting layout for tuples is not implemented yet");
assert(false && "Exporting unknown type with layout");
}
}
static void SetLayout(xla::Shape& shape, mlir::ArrayAttr layouts) {
if (shape.IsTuple()) {
for (int i = 0; i < shape.tuple_shapes_size(); ++i) {
SetLayout(*shape.mutable_tuple_shapes(i),
mlir::cast<mlir::DenseIntElementsAttr>(layouts[i]));
}
} else {
assert(layouts.size() == 1);
SetLayout(shape, mlir::cast<mlir::DenseIntElementsAttr>(layouts[0]));
}
}
static std::vector<xla::Shape> ConvertTypesToShapesWithLayout(
mlir::TypeRange value_types, mlir::ArrayAttr layouts) {
std::vector<xla::Shape> shapes_with_layout;
for (auto [type, layout] : llvm::zip(value_types, layouts)) {
xla::Shape shape = xla::TypeToShape(type);
SetLayout(shape, mlir::cast<mlir::DenseIntElementsAttr>(layout));
shapes_with_layout.push_back(std::move(shape));
}
return shapes_with_layout;
}
static xla::TriangularSolveOptions::Transpose Convert_transpose_a(
mlir::mhlo::Transpose transpose) {
return xla::ConvertTranspose(mlir::mhlo::stringifyTranspose(transpose))
.value();
}
static xla::Layout ExtractLayout(
mlir::Operation* op, int rank,
llvm::StringRef attr_name = kDefaultLayoutAttrName) {
if (auto attr = op->getAttrOfType<mlir::DenseIntElementsAttr>(attr_name)) {
llvm::SmallVector<int64_t, 4> minor_to_major;
DCHECK_EQ(rank, attr.size());
minor_to_major.reserve(attr.size());
for (const llvm::APInt& i : attr) {
minor_to_major.push_back(i.getZExtValue());
}
return xla::LayoutUtil::MakeLayout(minor_to_major);
}
return xla::LayoutUtil::MakeDescendingLayout(rank);
}
static mlir::FailureOr<xla::Shape> ExtractXlaShape(mlir::Operation* op) {
if (auto attr = op->getAttrOfType<mlir::StringAttr>(kDefaultLayoutAttrName)) {
return *xla::ParseShape(
absl::string_view(attr.getValue().data(), attr.getValue().size()));
} else {
std::vector<xla::Shape> subshapes;
for (auto [index, result] : llvm::enumerate(op->getResults())) {
subshapes.push_back(xla::TypeToShape(result.getType()));
if (subshapes.back().element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return op->emitError()
<< "result #" << index << " type is not supported";
}
}
if (subshapes.size() > 1) {
return xla::ShapeUtil::MakeTupleShape(subshapes);
}
return subshapes[0];
}
}
#define I64_ELEMENTS_ATTR_TO_VECTOR(attribute) \
static std::vector<int64_t> Convert_##attribute( \
std::optional<mlir::DenseIntElementsAttr> attribute) { \
return ConvertDenseIntAttr(attribute); \
}
I64_ELEMENTS_ATTR_TO_VECTOR(broadcast_sizes);
I64_ELEMENTS_ATTR_TO_VECTOR(permutation);
I64_ELEMENTS_ATTR_TO_VECTOR(start_indices);
I64_ELEMENTS_ATTR_TO_VECTOR(limit_indices);
I64_ELEMENTS_ATTR_TO_VECTOR(strides);
I64_ELEMENTS_ATTR_TO_VECTOR(slice_sizes);
I64_ELEMENTS_ATTR_TO_VECTOR(fft_length);
I64_ELEMENTS_ATTR_TO_VECTOR(dimensions);
I64_ELEMENTS_ATTR_TO_VECTOR(window_strides);
I64_ELEMENTS_ATTR_TO_VECTOR(lhs_dilation);
I64_ELEMENTS_ATTR_TO_VECTOR(rhs_dilation);
#undef I64_ELEMENTS_ATTR_TO_VECTOR
#define BOOL_ELEMENTS_ATTR_TO_VECTOR(attribute) \
static std::vector<bool> Convert_##attribute( \
std::optional<mlir::DenseElementsAttr> attribute) { \
if (!attribute) return {}; \
auto values = attribute->getValues<bool>(); \
return {values.begin(), values.end()}; \
}
BOOL_ELEMENTS_ATTR_TO_VECTOR(window_reversal);
#undef BOOL_ELEMENTS_ATTR_TO_VECTOR
static std::vector<int64_t> Convert_ArrayRef(llvm::ArrayRef<int64_t> values) {
return {values.begin(), values.end()};
}
static std::unique_ptr<xla::PrecisionConfig> Convert_precision_config(
std::optional<mlir::ArrayAttr> optional_precision_config_attr) {
if (!optional_precision_config_attr.has_value()) return nullptr;
auto precision_config = std::make_unique<xla::PrecisionConfig>();
for (auto attr : optional_precision_config_attr.value()) {
xla::PrecisionConfig::Precision p;
auto operand_precision =
mlir::mhlo::stringifyPrecision(
mlir::cast<mlir::mhlo::PrecisionAttr>(attr).getValue())
.str();
if (xla::PrecisionConfig::Precision_Parse(operand_precision, &p)) {
precision_config->add_operand_precision(p);
} else {
auto* context = attr.getContext();
mlir::emitError(mlir::UnknownLoc::get(context))
<< "unexpected operand precision " << operand_precision;
return nullptr;
}
}
return precision_config;
}
static xla::DotDimensionNumbers Convert_dot_dimension_numbers(
mlir::mhlo::DotDimensionNumbersAttr dot_dimension_numbers_attr) {
xla::DotDimensionNumbers dot_dimension_numbers;
auto rhs_contracting_dimensions =
dot_dimension_numbers_attr.getRhsContractingDimensions();
auto lhs_contracting_dimensions =
dot_dimension_numbers_attr.getLhsContractingDimensions();
auto rhs_batch_dimensions =
dot_dimension_numbers_attr.getRhsBatchingDimensions();
auto lhs_batch_dimensions =
dot_dimension_numbers_attr.getLhsBatchingDimensions();
for (const auto& val : rhs_contracting_dimensions) {
dot_dimension_numbers.add_rhs_contracting_dimensions(val);
}
for (const auto& val : lhs_contracting_dimensions) {
dot_dimension_numbers.add_lhs_contracting_dimensions(val);
}
for (const auto& val : rhs_batch_dimensions) {
dot_dimension_numbers.add_rhs_batch_dimensions(val);
}
for (const auto& val : lhs_batch_dimensions) {
dot_dimension_numbers.add_lhs_batch_dimensions(val);
}
return dot_dimension_numbers;
}
static xla::SparsityDescriptor Convert_sparsity_descriptor(
mlir::mhlo::SparsityDescriptorAttr sparsity_attr, bool is_lhs) {
xla::SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(xla::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_index(is_lhs ? 0 : 1);
sparsity_descriptor.set_dimension(sparsity_attr.getDimension());
sparsity_descriptor.set_n(sparsity_attr.getN());
sparsity_descriptor.set_m(sparsity_attr.getM());
return sparsity_descriptor;
}
xla::ChannelHandle Convert_channel_handle(mlir::mhlo::ChannelHandleAttr attr) {
xla::ChannelHandle channel_handle;
channel_handle.set_handle(attr.getHandle());
channel_handle.set_type(
static_cast<xla::ChannelHandle::ChannelType>(attr.getType()));
return channel_handle;
}
std::optional<xla::ChannelHandle> Convert_channel_handle(
std::optional<mlir::mhlo::ChannelHandleAttr> attr) {
if (!attr.has_value()) return std::nullopt;
return Convert_channel_handle(attr.value());
}
static xla::ComparisonDirection Convert_comparison_direction(
llvm::StringRef comparison_direction_string) {
return xla::StringToComparisonDirection(comparison_direction_string.str())
.value();
}
static xla::GatherDimensionNumbers Convert_dimension_numbers(
mlir::mhlo::GatherDimensionNumbersAttr input) {
xla::GatherDimensionNumbers output;
auto offset_dims = input.getOffsetDims();
std::copy(
offset_dims.begin(), offset_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(output.mutable_offset_dims()));
auto collapsed_slice_dims = input.getCollapsedSliceDims();
std::copy(collapsed_slice_dims.begin(), collapsed_slice_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_collapsed_slice_dims()));
auto operand_batching_dims = input.getOperandBatchingDims();
std::copy(operand_batching_dims.begin(), operand_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_operand_batching_dims()));
auto start_indices_batching_dims = input.getStartIndicesBatchingDims();
std::copy(start_indices_batching_dims.begin(),
start_indices_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_start_indices_batching_dims()));
auto start_index_map = input.getStartIndexMap();
std::copy(start_index_map.begin(), start_index_map.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_start_index_map()));
output.set_index_vector_dim(input.getIndexVectorDim());
return output;
}
static xla::ScatterDimensionNumbers Convert_scatter_dimension_numbers(
mlir::mhlo::ScatterDimensionNumbersAttr input) {
xla::ScatterDimensionNumbers output;
auto update_window_dims = input.getUpdateWindowDims();
std::copy(update_window_dims.begin(), update_window_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_update_window_dims()));
auto inserted_window_dims = input.getInsertedWindowDims();
std::copy(inserted_window_dims.begin(), inserted_window_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_inserted_window_dims()));
auto input_batching_dims = input.getInputBatchingDims();
std::copy(input_batching_dims.begin(), input_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_input_batching_dims()));
auto scatter_indices_batching_dims = input.getScatterIndicesBatchingDims();
std::copy(scatter_indices_batching_dims.begin(),
scatter_indices_batching_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_scatter_indices_batching_dims()));
auto scatter_dims_to_operand_dims = input.getScatterDimsToOperandDims();
std::copy(scatter_dims_to_operand_dims.begin(),
scatter_dims_to_operand_dims.end(),
tsl::protobuf::RepeatedFieldBackInserter(
output.mutable_scatter_dims_to_operand_dims()));
output.set_index_vector_dim(input.getIndexVectorDim());
return output;
}
static std::optional<xla::OpSharding> CreateOpShardingFromAttribute(
mlir::Operation* op) {
auto shardingAttr = op->getAttrOfType<mlir::StringAttr>(kMhloSharding);
if (!shardingAttr) return std::nullopt;
return xla::ConvertSharding(shardingAttr.getValue());
}
void ConstructFrontendAttributesFromAttribute(
const mlir::DictionaryAttr& frontend_attributes_dict,
xla::FrontendAttributes& frontend_attributes) {
for (const auto& attr : frontend_attributes_dict)
if (auto value_str_attr = mlir::dyn_cast<mlir::StringAttr>(attr.getValue()))
frontend_attributes.mutable_map()->insert(
{attr.getName().str(), value_str_attr.getValue().str()});
}
static xla::FrontendAttributes CreateXlaFrontendAttributesFromOp(
mlir::Operation* op) {
xla::FrontendAttributes frontend_attributes;
auto frontend_attributes_dict =
op->getAttrOfType<mlir::DictionaryAttr>(kMhloFrontendAttributes);
if (!frontend_attributes_dict) return frontend_attributes;
ConstructFrontendAttributesFromAttribute(frontend_attributes_dict,
frontend_attributes);
return frontend_attributes;
}
static void ExtractFrontendAttributesFromFunction(
mlir::func::FuncOp function,
llvm::SmallVectorImpl<std::optional<xla::FrontendAttributes>>* fe_attrs) {
fe_attrs->resize(function.getNumArguments(), std::nullopt);
for (int i = 0, end = function.getNumArguments(); i < end; ++i)
if (auto fe_attr = function.getArgAttrOfType<mlir::DictionaryAttr>(
i, kMhloFrontendAttributes)) {
xla::FrontendAttributes frontend_attributes;
ConstructFrontendAttributesFromAttribute(fe_attr, frontend_attributes);
(*fe_attrs)[i] = frontend_attributes;
}
}
static bool SomeOptionalShardingsAreSet(
llvm::ArrayRef<std::optional<xla::OpSharding>> shardings) {
return llvm::any_of(shardings,
[](const std::optional<xla::OpSharding>& sharding) {
return sharding.has_value();
});
}
static void ExtractShardingsFromFunction(
mlir::func::FuncOp function,
llvm::SmallVectorImpl<std::optional<xla::OpSharding>>* arg_shardings,
llvm::SmallVectorImpl<std::optional<xla::OpSharding>>* ret_shardings) {
arg_shardings->resize(function.getNumArguments(),
std::optional<xla::OpSharding>());
for (int i = 0, end = function.getNumArguments(); i < end; ++i)
if (auto sharding =
function.getArgAttrOfType<mlir::StringAttr>(i, kMhloSharding))
(*arg_shardings)[i] = xla::ConvertSharding(sharding.getValue());
ret_shardings->resize(function.getNumResults(),
std::optional<xla::OpSharding>());
for (int i = 0, end = function.getNumResults(); i < end; ++i)
if (auto sharding =
function.getResultAttrOfType<mlir::StringAttr>(i, kMhloSharding))
(*ret_shardings)[i] = xla::ConvertSharding(sharding.getValue());
}
std::optional<xla::OpSharding> CreateTupleSharding(
llvm::ArrayRef<std::optional<xla::OpSharding>> tuple_shardings) {
if (tuple_shardings.empty() ||
!SomeOptionalShardingsAreSet(tuple_shardings)) {
return std::nullopt;
}
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
for (const std::optional<xla::OpSharding>& tuple_sharding : tuple_shardings) {
if (tuple_sharding) {
*sharding.add_tuple_shardings() = *tuple_sharding;
} else {
xla::OpSharding fallback_sharding;
fallback_sharding.set_type(xla::OpSharding::REPLICATED);
*sharding.add_tuple_shardings() = fallback_sharding;
}
}
return sharding;
}
xla::XlaOp CreateTupleIfMultipleOps(
xla::XlaBuilder* builder, llvm::ArrayRef<xla::XlaOp> ops,
llvm::ArrayRef<std::optional<xla::OpSharding>> shardings) {
if (ops.size() == 1) {
return ops[0];
}
xla::XlaScopedShardingAssignment scoped_sharding(
builder, CreateTupleSharding(shardings));
return Tuple(builder, ops);
}
llvm::SmallVector<std::optional<xla::OpSharding>> GetResultShardings(
std::optional<xla::OpSharding> op_sharding, int64_t num_results) {
if (!op_sharding) {
return {};
}
llvm::SmallVector<std::optional<xla::OpSharding>> res_shardings;
res_shardings.reserve(num_results);
if (op_sharding->type() == xla::OpSharding::TUPLE) {
assert(op_sharding->tuple_shardings_size() == num_results);
res_shardings.assign(op_sharding->tuple_shardings().begin(),
op_sharding->tuple_shardings().end());
} else {
res_shardings.append(num_results, op_sharding);
}
return res_shardings;
}
llvm::SmallVector<std::optional<xla::OpSharding>> GetXlaOpShardings(
llvm::ArrayRef<xla::XlaOp> xla_ops) {
llvm::SmallVector<std::optional<xla::OpSharding>> shardings;
shardings.reserve(xla_ops.size());
for (const xla::XlaOp& xla_op : xla_ops) {
auto sharding = xla_op.builder()->GetOpSharding(xla_op);
assert(sharding.ok() && "can't find XlaOp for argument");
shardings.push_back(*sharding);
}
return shardings;
}
namespace mlir {
namespace {
class ConvertToHloModule {
public:
using ValueLoweringMap = llvm::DenseMap<Value, xla::XlaOp>;
using FunctionLoweringMap =
llvm::DenseMap<mlir::func::FuncOp, xla::XlaComputation>;
explicit ConvertToHloModule(mlir::ModuleOp module,
xla::XlaBuilder& module_builder,
MlirToHloConversionOptions options)
: module_(module), module_builder_(module_builder), options_(options) {}
LogicalResult Run() {
auto main = module_.lookupSymbol<mlir::func::FuncOp>(kMain);
if (!main)
return module_.emitError(
"conversion requires module with `main` function");
for (auto func : module_.getOps<func::FuncOp>()) {
if (func.empty()) continue;
if (failed(RunOnFunction(func))) return failure();
}
return success();
}
LogicalResult RunOnFunction(mlir::func::FuncOp f);
::xla::HloModuleProto ConsumeMainProto() {
auto main = module_.lookupSymbol<mlir::func::FuncOp>(kMain);
CHECK(main) << "requires module to have main function";
return lowered_computation_[main].proto();
}
LogicalResult LowerRegionAsComputation(
mlir::Region* region, xla::XlaComputation* func,
llvm::ArrayRef<mlir::Value> implicit_operands = {},
llvm::ArrayRef<mlir::Value> implicit_results = {},
bool ensure_single_arg = false,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings = {},
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings = {});
LogicalResult LowerBasicBlockAsFunction(
Block* block, xla::XlaBuilder* builder, bool is_entry_function,
bool ensure_single_arg,
const std::vector<bool>& entry_args_same_across_replicas,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<std::optional<xla::FrontendAttributes>> fe_attrs,
xla::XlaComputation* result,
llvm::ArrayRef<mlir::Value> implicit_operands = {},
llvm::ArrayRef<mlir::Value> implicit_results = {});
LogicalResult LowerCast(mlir::Operation* inst,
const MlirToHloConversionOptions& options,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerCompositeCall(
mlir::Operation* inst, xla::XlaBuilder* module_builder,
xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value);
LogicalResult LowerConstant(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
ElementsAttr const_attr);
LogicalResult LowerFunctionCall(
mlir::func::CallOp call_op, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerInfeed(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering);
LogicalResult LowerReturn(
Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value, const MlirToHloConversionOptions& options);
LogicalResult PropagateLayouts(const MlirToHloConversionOptions& options,
mlir::Operation* inst, xla::XlaOp xla_op);
func::FuncOp LookUpSymbol(FlatSymbolRefAttr symbol) {
return module_.lookupSymbol<mlir::func::FuncOp>(symbol);
}
xla::XlaComputation& GetLoweredComputation(func::FuncOp func) {
return lowered_computation_[func];
}
LogicalResult Lower(
mlir::Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value);
const MlirToHloConversionOptions& GetOptions() const { return options_; }
xla::StackFrameIndexProto BuildStackFramesIndexProto() {
return stack_frame_indexes_builder_.Build();
}
private:
LogicalResult SetEntryTupleShapesAndLeafReplication(
Block* block, const std::vector<bool>& entry_args_same_across_replicas,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes,
std::vector<bool>* leaf_replication);
LogicalResult SetEntryTupleShardings(
Block* block, xla::XlaBuilder* builder,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes);
mlir::ModuleOp module_;
xla::XlaBuilder& module_builder_;
mlir::StackFrameIndexBuilder stack_frame_indexes_builder_;
FunctionLoweringMap lowered_computation_;
size_t region_id_ = 0;
MlirToHloConversionOptions options_;
};
}
}
namespace {
struct OpLoweringContext {
llvm::DenseMap<mlir::Value, xla::XlaOp>* values;
mlir::ConvertToHloModule* converter;
xla::XlaBuilder* builder;
mlir::StackFrameIndexBuilder* frame_index_builder;
};
mlir::LogicalResult GetTuple(mlir::Operation* op,
mlir::Operation::operand_range values,
OpLoweringContext ctx,
llvm::SmallVectorImpl<xla::XlaOp>& results) {
results.reserve(values.size());
for (mlir::Value value : values) {
if (failed(GetXlaOp(value, *ctx.values, &results.emplace_back(), op)))
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetXlaOps(mlir::Operation* op,
llvm::ArrayRef<mlir::Value> values,
OpLoweringContext ctx,
llvm::SmallVectorImpl<xla::XlaOp>& results) {
results.reserve(values.size());
for (mlir::Value value : values) {
if (failed(GetXlaOp(value, *ctx.values, &results.emplace_back(), op)))
return mlir::failure();
}
return mlir::success();
}
bool SimplyReturnedOp(mlir::Operation* op) {
for (auto operand : op->getOperands()) {
if (!llvm::isa<mlir::BlockArgument>(operand)) return false;
}
auto users = op->getUsers();
if (users.empty()) return false;
auto first_user = *users.begin();
for (auto user : users) {
if (first_user != user) return false;
}
if (llvm::isa<mlir::func::ReturnOp>(first_user)) return true;
return false;
}
void BuildGetTupleElementsForTupleResults(mlir::Operation* op, xla::XlaOp tuple,
OpLoweringContext ctx,
unsigned num_implicit_results = 0) {
const std::optional<xla::OpSharding>& sharding = ctx.builder->sharding();
if (sharding.has_value()) {
bool is_tuple_sharding = sharding->type() == xla::OpSharding::TUPLE;
assert(!is_tuple_sharding || (op->getNumResults() + num_implicit_results ==
sharding->tuple_shardings_size()));
for (auto [index, result] : llvm::enumerate(op->getResults())) {
xla::XlaScopedShardingAssignment scoped_sharding(
ctx.builder,
is_tuple_sharding ? sharding->tuple_shardings(index) : sharding);
(*ctx.values)[result] = xla::GetTupleElement(tuple, index);
}
} else {
xla::XlaScopedShardingAssignment scoped_sharding(ctx.builder, std::nullopt);
for (auto [index, result] : llvm::enumerate(op->getResults())) {
(*ctx.values)[result] = xla::GetTupleElement(tuple, index);
}
}
}
}
namespace mlir {
namespace mhlo {
namespace {
LogicalResult ExportXlaOp(CollectiveBroadcastOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op->getResult(0)] = xla::CollectiveBroadcast(
operand, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()));
return success();
}
LogicalResult ExportXlaOp(CompositeOp, OpLoweringContext) {
return failure();
}
LogicalResult ExportXlaOp(DynamicBroadcastInDimOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicConvOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicGatherOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicIotaOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicPadOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(DynamicReshapeOp op, OpLoweringContext ctx) {
auto resultType = mlir::dyn_cast<RankedTensorType>(op.getResult().getType());
if (!resultType) return op->emitOpError() << "expected ranked result";
auto resultBounds = hlo::encodingToBounds(resultType.getEncoding());
if (resultBounds.empty())
return op->emitOpError() << "expected bounded result";
auto shapeType =
mlir::dyn_cast<RankedTensorType>(op.getOutputShape().getType());
if (!shapeType || !shapeType.getElementType().isInteger(32))
return op->emitOpError() << "expected output shape to be tensor<Nxi32>";
auto& value_map = *ctx.values;
xla::XlaOp operand;
xla::XlaOp outputShape;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getOutputShape(), value_map, &outputShape, op)))
return failure();
SmallVector<xla::XlaOp> dimSizes;
SmallVector<int64_t> newSizeBounds;
std::vector<bool> dimsAreDynamic;
for (auto i = 0; i < resultType.getRank(); ++i) {
auto runtimeSizeX1 = xla::Slice(outputShape, {i}, {i + 1}, {1});
dimSizes.push_back(xla::Reshape(runtimeSizeX1, {}));
auto dimSize = resultType.getDimSize(i);
auto dimBound = resultBounds[i];
if (!hlo::isStaticDimSize(dimSize) && !hlo::isStaticDimSize(dimBound))
return op->emitOpError() << "unbounded dynamism is not supported";
newSizeBounds.push_back(hlo::isStaticDimSize(dimSize) ? dimSize : dimBound);
dimsAreDynamic.push_back(!hlo::isStaticDimSize(dimSize));
}
value_map[op] =
xla::DynamicReshape(operand, dimSizes, newSizeBounds, dimsAreDynamic);
return success();
}
LogicalResult ExportXlaOp(RealDynamicSliceOp op, OpLoweringContext ctx) {
return failure();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::CopyOp op, OpLoweringContext ctx) {
if (op.getCrossProgramPrefetchIndex() && !SimplyReturnedOp(op))
return op->emitOpError() << "synchronous CopyOp should not include "
"cross_program_prefetch_index attribute.";
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp xla_arg_0;
if (failed(
GetXlaOp(*op.getODSOperands(0).begin(), value_map, &xla_arg_0, op)))
return mlir::failure();
auto xla_result = xla::Copy(Unwrap(xla_arg_0));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(AddDependencyOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
auto operand_shape = ctx.builder->GetShape(operand).value();
value_map[op] = xla::internal::XlaBuilderFriend::BuildAddDependency(
ctx.builder, operand, token, operand_shape);
return success();
}
LogicalResult ExportXlaOp(AllGatherOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands))) {
return failure();
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
auto all_gather_dim = op.getAllGatherDim();
int64_t shard_count = 0;
for (size_t i = 0; i < operands.size(); ++i) {
TensorType operand_type =
mlir::cast<TensorType>(op.getOperand(i).getType());
TensorType result_type = mlir::cast<TensorType>(op.getType(i));
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
if (i == 0) {
shard_count = result_type.getDimSize(all_gather_dim) /
operand_type.getDimSize(all_gather_dim);
}
}
if (shape_or->IsTuple()) {
std::optional<xla::Layout> layout = std::nullopt;
if (shape_or->has_layout()) {
layout = shape_or->layout();
}
auto tuple = xla::AllGatherTuple(
operands, all_gather_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), layout,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
value_map[op->getResults()[0]] = xla::AllGather(
operands[0], all_gather_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
}
return success();
}
LogicalResult ExportXlaOp(AllReduceOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands)))
return failure();
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
if (shape_or->IsTuple()) {
std::optional<xla::Shape> shape_with_layout = std::nullopt;
if (shape_or->has_layout()) shape_with_layout = shape_or.value();
auto tuple = xla::AllReduceTuple(
operands, computation, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), shape_with_layout,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
value_map[op->getResults()[0]] = xla::AllReduce(
operands[0], computation, Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
}
return success();
}
LogicalResult ExportXlaOp(AllToAllOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op.getOperation(), op.getOperands(), ctx, operands))) {
return failure();
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(op.getOperation());
if (failed(shape_or)) return failure();
if (shape_or->IsTuple()) {
std::optional<xla::Layout> layout = std::nullopt;
if (shape_or->has_layout()) {
layout = shape_or->layout();
}
auto tuple = xla::AllToAllTuple(
operands, Convert_replica_groups(op.getReplicaGroups()), layout,
Convert_channel_handle(op.getChannelHandle()));
BuildGetTupleElementsForTupleResults(op, tuple, ctx);
} else {
std::optional<uint64_t> splitDimension = op.getSplitDimension();
std::optional<uint64_t> concatDimension = op.getConcatDimension();
std::optional<uint64_t> splitCount = op.getSplitCount();
value_map[op->getResults()[0]] = xla::AllToAll(
operands[0], *splitDimension, *concatDimension, *splitCount,
Convert_replica_groups(op.getReplicaGroups()),
std::nullopt, Convert_channel_handle(op.getChannelHandle()));
}
return success();
}
LogicalResult ExportXlaOp(ReduceScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
TensorType operand_type = mlir::cast<TensorType>(op.getOperand().getType());
TensorType result_type = op.getType();
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
auto scatter_dim = op.getScatterDimension();
int64_t shard_count = operand_type.getDimSize(scatter_dim) /
result_type.getDimSize(scatter_dim);
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
value_map[op] = xla::ReduceScatter(
operand, computation, scatter_dim, shard_count,
Convert_replica_groups(op.getReplicaGroups()),
Convert_channel_handle(op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(op.getUseGlobalDeviceIds()));
return success();
}
LogicalResult ExportXlaOp(AsyncStartOp op, OpLoweringContext ctx) {
for (auto* user : op.getResult().getUsers()) {
if (!isa<AsyncUpdateOp, AsyncDoneOp>(user)) {
return op.emitOpError() << "Users of AsyncStart's return value must be "
<< "async_update or async_done";
}
}
auto& value_map = *ctx.values;
Value result = op.getResult();
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
FlatSymbolRefAttr::get(op->getContext(), op.getCalledComputation()));
auto all_gather_op =
dyn_cast_or_null<AllGatherOp>(callee.getBody().front().front());
if (all_gather_op && SimplyReturnedOp(all_gather_op)) {
TensorType operand_type =
mlir::cast<TensorType>(all_gather_op.getOperand(0).getType());
TensorType result_type = mlir::cast<TensorType>(all_gather_op.getType(0));
if (!operand_type.hasStaticShape() || !result_type.hasStaticShape())
return failure();
if (operands.size() != 1) return failure();
auto all_gather_dim = all_gather_op.getAllGatherDim();
int64_t shard_count = result_type.getDimSize(all_gather_dim) /
operand_type.getDimSize(all_gather_dim);
value_map[result] = xla::internal::XlaBuilderFriend::BuildAllGatherStart(
ctx.builder, operands[0], all_gather_dim, shard_count,
Convert_replica_groups(all_gather_op.getReplicaGroups()),
Convert_channel_handle(all_gather_op.getChannelHandle()),
ExtractLayout(all_gather_op,
mlir::cast<RankedTensorType>(result_type).getRank()),
Convert_use_global_device_ids(all_gather_op.getUseGlobalDeviceIds()));
return success();
}
auto all_reduce_op =
dyn_cast_or_null<AllReduceOp>(callee.getBody().front().front());
if (all_reduce_op && SimplyReturnedOp(all_reduce_op)) {
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(
&all_reduce_op.getComputation(), &computation))) {
return failure();
}
if (operands.size() != 1) return failure();
value_map[result] = xla::internal::XlaBuilderFriend::BuildAllReduceStart(
ctx.builder, operands[0], computation,
Convert_replica_groups(all_reduce_op.getReplicaGroups()),
Convert_channel_handle(all_reduce_op.getChannelHandle()), std::nullopt,
Convert_use_global_device_ids(all_reduce_op.getUseGlobalDeviceIds()));
return success();
}
auto collective_permute_op =
dyn_cast_or_null<CollectivePermuteOp>(callee.getBody().front().front());
if (collective_permute_op && SimplyReturnedOp(collective_permute_op)) {
value_map[result] =
xla::internal::XlaBuilderFriend::BuildCollectivePermuteStart(
ctx.builder, operands[0],
Convert_source_target_pairs(
collective_permute_op.getSourceTargetPairs()),
Convert_channel_handle(collective_permute_op.getChannelHandle()));
return mlir::success();
}
auto copy_op = dyn_cast_or_null<CopyOp>(callee.getBody().front().front());
if (copy_op && SimplyReturnedOp(copy_op)) {
std::optional<int> cross_program_prefetch_index =
copy_op.getCrossProgramPrefetchIndex()
? std::make_optional(*copy_op.getCrossProgramPrefetchIndex())
: std::nullopt;
value_map[result] = xla::internal::XlaBuilderFriend::BuildCopyStart(
ctx.builder, operands[0], cross_program_prefetch_index);
return mlir::success();
}
auto send_op = dyn_cast_or_null<SendOp>(callee.getBody().front().front());
if (send_op && SimplyReturnedOp(send_op)) {
xla::XlaOp operand;
if (operands.size() == 2)
operand = operands[0];
else
operand =
Tuple(ctx.builder, absl::Span<const xla::XlaOp>(operands).subspan(
0, operands.size() - 1));
xla::XlaOp token = operands[operands.size() - 1];
value_map[result] = xla::internal::XlaBuilderFriend::BuildSend(
ctx.builder, operand, token,
Convert_channel_handle(send_op.getChannelHandle()),
send_op.getIsHostTransfer());
return mlir::success();
}
auto recv_op = dyn_cast_or_null<RecvOp>(callee.getBody().front().front());
if (recv_op && SimplyReturnedOp(recv_op)) {
auto result_types =
mlir::cast<AsyncBundleType>(result.getType()).getTypes()[1];
mlir::Type received_type = mlir::TupleType::get(op->getContext(), {});
if (isa<TupleType>(result_types)) {
received_type = mlir::cast<TupleType>(result_types).getType(0);
}
value_map[result] = xla::internal::XlaBuilderFriend::BuildRecv(
ctx.builder, operands[0], xla::TypeToShape(received_type),
Convert_channel_handle(recv_op.getChannelHandle()),
recv_op.getIsHostTransfer());
return mlir::success();
}
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& computation =
ctx.converter->GetLoweredComputation(callee);
computation.mutable_proto()->mutable_computations(0)->set_execution_thread(
op.getExecutionThread().str());
auto [xla_op, computation_id] =
xla::internal::XlaBuilderFriend::BuildAsyncStart(
ctx.builder, operands, op.getExecutionThread().str(), computation,
xla::TypeToShape(result.getType()));
value_map[result] = xla_op;
computation.mutable_proto()->mutable_computations(0)->set_id(computation_id);
return success();
}
LogicalResult ExportXlaOp(AsyncUpdateOp op, OpLoweringContext ctx) {
if (!isa<AsyncStartOp, AsyncUpdateOp>(op.getBundle().getDefiningOp())) {
auto theerror = op.emitError()
<< "Defining op of AsyncUpdate's operand must be "
<< "async_start or async_update";
if (op.getBundle().getDefiningOp()) {
return theerror << ", but got "
<< op.getBundle().getDefiningOp()->getName();
} else {
return theerror << ".";
}
}
for (auto* user : op.getResult().getUsers()) {
if (!isa<AsyncUpdateOp, AsyncDoneOp>(user)) {
return op.emitOpError() << "Users of AsyncUpdate's return value must be "
<< "async_update or async_done";
}
}
auto& value_map = *ctx.values;
Value result = op.getResult();
xla::XlaOp operand;
if (failed(GetXlaOp(op.getBundle(), value_map, &operand, op)))
return failure();
value_map[result] = xla::internal::XlaBuilderFriend::BuildAsyncUpdate(
ctx.builder, operand, xla::TypeToShape(result.getType()));
return success();
}
LogicalResult ExportXlaOp(AsyncDoneOp op, OpLoweringContext ctx) {
if (!isa<AsyncStartOp, AsyncUpdateOp>(op.getBundle().getDefiningOp())) {
auto theerror = op.emitError()
<< "Defining op of AsyncDone's operand must be "
<< "async_start or async_update";
if (op.getBundle().getDefiningOp())
return theerror << ", but got "
<< op.getBundle().getDefiningOp()->getName();
return theerror << ".";
}
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getBundle(), value_map, &operand, op)))
return failure();
Operation* start = op;
while (start != nullptr && !isa<AsyncStartOp>(start)) {
start = start->getOperand(0).getDefiningOp();
if (start == nullptr || !isa<AsyncStartOp, AsyncUpdateOp>(start)) {
return op.emitError() << "Defining op of AsyncDone's operand must be "
<< "async_start or async_update";
}
}
if (!isa<AsyncStartOp>(start)) {
return op.emitError() << "Could not find async chain start";
}
mlir::func::FuncOp callee =
ctx.converter->LookUpSymbol(FlatSymbolRefAttr::get(
op->getContext(), cast<AsyncStartOp>(start).getCalledComputation()));
auto all_gather_op =
dyn_cast_or_null<AllGatherOp>(callee.getBody().front().front());
if (all_gather_op && SimplyReturnedOp(all_gather_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildAllGatherDone(
ctx.builder, operand, xla::TypeToShape(all_gather_op.getType(0)));
return success();
}
auto all_reduce_op =
dyn_cast_or_null<AllReduceOp>(callee.getBody().front().front());
if (all_reduce_op && SimplyReturnedOp(all_reduce_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildAllReduceDone(
ctx.builder, operand, xla::TypeToShape(all_reduce_op.getType(0)));
return success();
}
auto collective_permute_op =
dyn_cast_or_null<CollectivePermuteOp>(callee.getBody().front().front());
if (collective_permute_op && SimplyReturnedOp(collective_permute_op)) {
value_map[op.getResult(0)] =
xla::internal::XlaBuilderFriend::BuildCollectivePermuteDone(
ctx.builder, operand,
xla::TypeToShape(collective_permute_op.getType()));
return success();
}
auto copy_op = dyn_cast_or_null<CopyOp>(callee.getBody().front().front());
if (copy_op && SimplyReturnedOp(copy_op)) {
value_map[op.getResult(0)] = xla::internal::XlaBuilderFriend::BuildCopyDone(
ctx.builder, operand, xla::TypeToShape(copy_op.getType()));
return success();
}
auto send_op = dyn_cast_or_null<SendOp>(callee.getBody().front().front());
if (send_op && SimplyReturnedOp(send_op)) {
value_map[op.getResult(0)] = xla::internal::XlaBuilderFriend::BuildSendDone(
ctx.builder, operand,
Convert_channel_handle(send_op.getChannelHandle()),
send_op.getIsHostTransfer());
return success();
}
auto recv_op = dyn_cast_or_null<RecvOp>(callee.getBody().front().front());
if (recv_op && SimplyReturnedOp(recv_op)) {
auto result_types =
mlir::cast<AsyncBundleType>(op.getBundle().getType()).getTypes()[1];
mlir::Type received_type = mlir::TupleType::get(op->getContext(), {});
if (isa<TupleType>(result_types)) {
received_type = mlir::cast<TupleType>(result_types).getType(0);
}
xla::XlaOp xla_recv = xla::internal::XlaBuilderFriend::BuildRecvDone(
ctx.builder, operand, xla::TypeToShape(received_type),
Convert_channel_handle(recv_op.getChannelHandle()),
recv_op.getIsHostTransfer());
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = xla_recv;
} else {
BuildGetTupleElementsForTupleResults(op, xla_recv, ctx);
}
return success();
}
std::vector<xla::Shape> subshapes;
for (const auto& item : op.getResults().getType()) {
subshapes.push_back(xla::TypeToShape(item));
}
xla::Shape data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
xla::XlaOp exportedOp = xla::internal::XlaBuilderFriend::BuildAsyncDone(
ctx.builder, operand, data_shape);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = exportedOp;
} else {
BuildGetTupleElementsForTupleResults(op, exportedOp, ctx);
}
return success();
}
LogicalResult ExportXlaOp(BitcastConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] = xla::BitcastConvertType(
operand,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(BroadcastInDimOp op, OpLoweringContext ctx) {
auto type = mlir::dyn_cast<RankedTensorType>(op.getType());
if (!type) return failure();
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] =
BroadcastInDim(operand, Convert_ArrayRef(type.getShape()),
Convert_broadcast_dimensions(op.getBroadcastDimensions()));
return success();
}
LogicalResult ExportXlaOp(StochasticConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, random;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getRandom(), value_map, &random, op)))
return failure();
value_map[op] = xla::StochasticConvertType(
operand, random,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(CosineOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Cos(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(TanOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Tan(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(DotOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
value_map[op] = xla::Dot(
lhs, rhs, Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type);
return mlir::success();
}
LogicalResult ExportXlaOp(DotGeneralOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
auto precision_config = Convert_precision_config(op.getPrecisionConfig());
if (op.getAlgorithmAttr()) {
absl::StatusOr<xla::PrecisionConfig::Algorithm> algorithm =
xla::ConvertDotAlgorithm(op.getAlgorithmAttr());
if (!algorithm.ok()) {
return op.emitError(algorithm.status().ToString());
}
precision_config->set_algorithm(algorithm.value());
}
auto xlaOp = xla::DotGeneral(
lhs, rhs, Convert_dot_dimension_numbers(op.getDotDimensionNumbers()),
Unwrap(precision_config), preferred_element_type);
value_map[op] = xlaOp;
return mlir::success();
}
LogicalResult ExportXlaOp(SparseDotOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
llvm::SmallVector<xla::XlaOp> sparse_meta;
if (failed(GetTuple(op, op.getMeta(), ctx, sparse_meta))) return failure();
std::vector<xla::SparsityDescriptor> sparsity;
if (op.getLhsSparsity().has_value()) {
sparsity.push_back(
Convert_sparsity_descriptor(*op.getLhsSparsity(), true));
}
if (op.getRhsSparsity().has_value()) {
sparsity.push_back(
Convert_sparsity_descriptor(*op.getRhsSparsity(), false));
}
value_map[op] =
xla::SparseDot(lhs, rhs, absl::MakeSpan(sparse_meta), sparsity,
Convert_dot_dimension_numbers(op.getDotDimensionNumbers()),
Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type);
return mlir::success();
}
LogicalResult ExportXlaOp(DomainOp op, OpLoweringContext ctx) {
auto& valueMap = *ctx.values;
xla::Shape shape = xla::TypeToShape(op.getResult().getType());
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), valueMap, &operand, op)))
return failure();
auto entry = xla::ConvertSharding(op.getEntryMetadata());
if (!entry) return failure();
auto exit = xla::ConvertSharding(op.getExitMetadata());
if (!exit) return failure();
valueMap[op] = xla::internal::XlaBuilderFriend::BuildDomain(
ctx.builder, operand, *exit, *entry, shape);
return success();
}
LogicalResult ExportXlaOp(IfOp op, OpLoweringContext ctx) {
xla::XlaComputation true_branch;
xla::XlaComputation false_branch;
auto& value_map = *ctx.values;
llvm::SetVector<mlir::Value> implicit_true_operand_set,
implicit_false_operand_set;
getUsedValuesDefinedAbove(op.getTrueBranch(), op.getTrueBranch(),
implicit_true_operand_set);
getUsedValuesDefinedAbove(op.getFalseBranch(), op.getFalseBranch(),
implicit_false_operand_set);
llvm::SmallVector<mlir::Value> implicit_true_operands =
implicit_true_operand_set.takeVector();
llvm::SmallVector<mlir::Value> implicit_false_operands =
implicit_false_operand_set.takeVector();
llvm::SmallVector<std::optional<xla::OpSharding>> ret_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SmallVector<xla::XlaOp> true_args;
if (failed(GetXlaOps(op, implicit_true_operands, ctx, true_args)))
return failure();
llvm::SmallVector<xla::XlaOp> false_args;
if (failed(GetXlaOps(op, implicit_false_operands, ctx, false_args)))
return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> true_arg_shardings,
false_arg_shardings;
if (!ret_shardings.empty()) {
true_arg_shardings = GetXlaOpShardings(true_args);
false_arg_shardings = GetXlaOpShardings(false_args);
}
if (failed(ctx.converter->LowerRegionAsComputation(
&op.getTrueBranch(), &true_branch, implicit_true_operands,
{}, true,
true_arg_shardings, ret_shardings)) ||
failed(ctx.converter->LowerRegionAsComputation(
&op.getFalseBranch(), &false_branch, implicit_false_operands,
{}, true,
false_arg_shardings, ret_shardings))) {
return failure();
}
xla::XlaOp pred;
if (failed(GetXlaOp(op.getPred(), value_map, &pred, op))) return failure();
xla::XlaOp true_arg =
CreateTupleIfMultipleOps(ctx.builder, true_args, true_arg_shardings);
xla::XlaOp false_arg =
CreateTupleIfMultipleOps(ctx.builder, false_args, false_arg_shardings);
auto ifop =
xla::Conditional(pred, true_arg, true_branch, false_arg, false_branch);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = ifop;
} else {
BuildGetTupleElementsForTupleResults(op, ifop, ctx);
}
return success();
}
LogicalResult ExportXlaOp(CaseOp op, OpLoweringContext ctx) {
llvm::DenseMap<mlir::Value, xla::XlaOp>& value_map = *ctx.values;
MutableArrayRef<Region> branches = op.getBranches();
llvm::SmallVector<xla::XlaOp, 4> branch_operands(branches.size());
std::vector<xla::XlaComputation> computations(branches.size());
std::vector<xla::XlaComputation*> computations_p(branches.size());
for (unsigned i = 0; i < branches.size(); ++i) {
llvm::SetVector<mlir::Value> implicit_operand_set;
getUsedValuesDefinedAbove(branches[i], branches[i], implicit_operand_set);
llvm::SmallVector<mlir::Value> implicit_operands =
implicit_operand_set.takeVector();
llvm::SmallVector<std::optional<xla::OpSharding>> ret_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SmallVector<xla::XlaOp> args;
if (failed(GetXlaOps(op, implicit_operands, ctx, args))) return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> arg_shardings;
if (!ret_shardings.empty()) {
arg_shardings = GetXlaOpShardings(args);
}
branch_operands[i] =
CreateTupleIfMultipleOps(ctx.builder, args, arg_shardings);
computations_p[i] = &computations[i];
if (failed(ctx.converter->LowerRegionAsComputation(
&branches[i], computations_p[i], implicit_operands,
{}, true, arg_shardings,
ret_shardings)))
return failure();
}
xla::XlaOp index;
if (failed(GetXlaOp(op.getIndex(), value_map, &index, op))) return failure();
xla::XlaOp caseop = xla::Conditional(index, computations_p, branch_operands);
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = caseop;
} else {
BuildGetTupleElementsForTupleResults(op, caseop, ctx);
}
return success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::CompareOp op,
OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
auto dir = Convert_comparison_direction(
mlir::mhlo::stringifyComparisonDirection(op.getComparisonDirection()));
auto type_attr = op.getCompareTypeAttr();
xla::XlaOp xla_result;
if (type_attr && type_attr.getValue() != mlir::mhlo::ComparisonType::NOTYPE) {
auto type = xla::StringToComparisonType(
stringifyComparisonType(type_attr.getValue()).str())
.value();
xla_result = xla::Compare(lhs, rhs, {}, dir, type);
} else {
xla_result = xla::Compare(lhs, rhs, dir);
}
value_map[op] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(ConstantOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(mlir::mhlo::ConvolutionOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp lhs, rhs;
if (failed(GetXlaOp(op.getLhs(), value_map, &lhs, op)))
return mlir::failure();
if (failed(GetXlaOp(op.getRhs(), value_map, &rhs, op)))
return mlir::failure();
xla::PrimitiveType preferred_element_type =
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType()));
xla::XlaOp xla_result = xla::ConvGeneralDilated(
lhs, rhs, Convert_window_strides(op.getWindowStrides()),
Convert_padding(op.getPadding()),
Convert_lhs_dilation(op.getLhsDilation()),
Convert_rhs_dilation(op.getRhsDilation()),
xla::ConvertConvDimensionNumbers(op.getDimensionNumbers()),
Convertuint64_t(op.getFeatureGroupCount()),
Convertuint64_t(op.getBatchGroupCount()),
Unwrap(Convert_precision_config(op.getPrecisionConfig())),
preferred_element_type, Convert_window_reversal(op.getWindowReversal()));
value_map[op] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(ConvertOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] = xla::ConvertElementType(
operand,
xla::ConvertMlirTypeToPrimitiveType(getElementTypeOrSelf(op.getType())));
return success();
}
LogicalResult ExportXlaOp(CustomCallOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> args;
if (failed(GetTuple(op, op.getInputs(), ctx, args))) return failure();
if (op.getCallTargetName() == kApproxTopK) {
auto isSupportedAttrName = [](NamedAttribute attr) {
auto name = attr.getName();
return name == kCallTargetName || name == kBackendConfig ||
name == kApiVersion || name == kCalledComputations ||
name == kHasSideEffect;
};
for (const auto& attr : op->getAttrs()) {
if (!isSupportedAttrName(attr))
return op.emitOpError()
<< attr.getName().getValue()
<< " is not a supported attribute for ApproxTopK";
}
auto backend_config =
mlir::dyn_cast_or_null<mlir::DictionaryAttr>(op.getBackendConfigAttr());
if (!backend_config)
return op.emitOpError() << "Missing backend_config attribute";
for (auto attr : backend_config) {
auto name = attr.getName();
if (!(name == kTopK || name == kReductionDim || name == kRecallTarget ||
name == kAggregateToTopk || name == kReductionInputSizeOverride ||
name == kIsFallback))
return op.emitOpError()
<< name.getValue() << " is not a supported backend_config"
<< " attribute for ApproxTopK";
}
auto checkI64Attr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<IntegerAttr>(attr_name);
if (!attr || !attr.getType().isInteger(64))
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of i64 type";
return success();
};
auto checkF32Attr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<FloatAttr>(attr_name);
if (!attr || !attr.getType().isF32())
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of f32 type";
return success();
};
auto checkBoolAttr =
[&](const std::string& attr_name) -> mlir::LogicalResult {
if (!backend_config.contains(attr_name))
return op.emitOpError()
<< "Missing " << attr_name << " attribute in backend_config";
auto attr = backend_config.getAs<BoolAttr>(attr_name);
if (!attr)
return op.emitOpError()
<< attr_name
<< " attribute in backend_config must be of bool type";
return success();
};
if (failed(checkI64Attr(kTopK))) return failure();
if (failed(checkI64Attr(kReductionDim))) return failure();
if (failed(checkF32Attr(kRecallTarget))) return failure();
if (failed(checkBoolAttr(kAggregateToTopk))) return failure();
if (failed(checkI64Attr(kReductionInputSizeOverride))) return failure();
bool has_is_fallback = backend_config.contains(kIsFallback);
if (has_is_fallback && !backend_config.getAs<BoolAttr>(kIsFallback))
return op.emitOpError()
<< "is_fallback attribute in backend_config must be of bool type";
int64_t top_k = backend_config.getAs<IntegerAttr>(kTopK).getInt();
int64_t reduction_dim =
backend_config.getAs<IntegerAttr>(kReductionDim).getInt();
float recall_target = backend_config.getAs<FloatAttr>(kRecallTarget)
.getValue()
.convertToFloat();
bool aggregate_to_topk =
backend_config.getAs<BoolAttr>(kAggregateToTopk).getValue();
int64_t reduction_input_size_override =
backend_config.getAs<IntegerAttr>(kReductionInputSizeOverride).getInt();
bool is_fallback = has_is_fallback &&
backend_config.getAs<BoolAttr>(kIsFallback).getValue();
if (args.size() % 2 != 0) {
return op.emitOpError() << "ApproxTopK takes an even number of operands.";
}
auto num_inputs = args.size() / 2;
absl::Span<const xla::XlaOp> inputs(args.begin(), num_inputs);
absl::Span<const xla::XlaOp> init_values(args.begin() + num_inputs,
num_inputs);
if (num_inputs != op.getNumResults()) {
return op.emitOpError() << "num_results does not match num_inputs";
}
SmallVector<RankedTensorType> input_types, init_value_types, result_types;
for (size_t i = 0; i < num_inputs; ++i) {
auto input_type =
mlir::dyn_cast<RankedTensorType>(op.getOperand(i).getType());
if (!input_type) return failure();
input_types.push_back(input_type);
auto init_value_type = mlir::dyn_cast<RankedTensorType>(
op.getOperand(num_inputs + i).getType());
if (!init_value_type) return failure();
init_value_types.push_back(init_value_type);
auto result_type =
mlir::dyn_cast<RankedTensorType>(op.getResult(i).getType());
if (!result_type) return failure();
result_types.push_back(result_type);
}
for (size_t i = 0; i < inputs.size(); ++i) {
if (input_types[0].getShape() != input_types[i].getShape()) {
return op.emitOpError() << "input shape mismatch at position " << i;
}
if (init_value_types[i].getElementType() !=
input_types[i].getElementType()) {
return op.emitOpError()
<< "input and init_value element type mismatch at position "
<< i;
}
if (input_types[i].getElementType() != result_types[i].getElementType()) {
return op.emitOpError()
<< "result element type mismatch at position " << i;
}
for (size_t j = 0; j < input_types[i].getRank(); ++j) {
if (j == reduction_dim) {
auto reduction_output_size = xla::ApproxTopKReductionOutputSize(
input_types[i].getShape()[j], input_types[i].getRank(), top_k,
recall_target, aggregate_to_topk, reduction_input_size_override);
if (!reduction_output_size.ok()) return failure();
if (result_types[i].getShape()[j] != reduction_output_size->first)
return op.emitOpError()
<< "ApproxTopK aggregates to k="
<< reduction_output_size->first << ", but got "
<< result_types[i].getShape()[j];
continue;
}
if (input_types[i].getShape()[j] != result_types[i].getShape()[j]) {
return op.emitOpError() << "result shape mismatch at position " << i
<< ", index " << j;
}
}
}
auto called_computations = op.getCalledComputations();
if (called_computations.size() != 1) {
return op.emitOpError()
<< "ApproxTopK takes exactly 1 called_computation.";
}
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
mlir::cast<FlatSymbolRefAttr>(op.getCalledComputations()[0]));
mlir::FunctionType callee_type = callee.getFunctionType();
SmallVector<Type, 4> expected_callee_input_types;
for (unsigned i = 0; i < num_inputs; ++i) {
auto scalar = RankedTensorType::get({}, input_types[i].getElementType());
expected_callee_input_types.push_back(scalar);
expected_callee_input_types.push_back(scalar);
}
FunctionType expected_callee_type = mlir::FunctionType::get(
op->getContext(), expected_callee_input_types,
RankedTensorType::get({}, IntegerType::get(op->getContext(), 1)));
if (callee_type != expected_callee_type) {
return op.emitOpError()
<< "called_computation type does not match the expected type. Got "
<< callee_type << " expected " << expected_callee_type;
}
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& comparator =
ctx.converter->GetLoweredComputation(callee);
if (reduction_dim < 0 || reduction_dim > input_types[0].getRank())
return op.emitOpError() << "reduction_dim out of range";
if (recall_target <= 0 || recall_target > 1.0)
return op.emitOpError() << "recall_target out of range";
if (reduction_input_size_override >= 0 &&
reduction_input_size_override <
input_types[0].getShape()[reduction_dim])
return op.emitOpError() << "reduction_input_size_override out of range";
xla::XlaOp cc_op;
if (is_fallback) {
cc_op = xla::ApproxTopKFallback(
ctx.builder, inputs, init_values, top_k, reduction_dim, comparator,
recall_target, aggregate_to_topk, reduction_input_size_override);
} else {
cc_op = xla::ApproxTopK(ctx.builder, inputs, init_values, top_k,
reduction_dim, comparator, recall_target,
aggregate_to_topk, reduction_input_size_override);
}
BuildGetTupleElementsForTupleResults(op, cc_op, ctx);
return success();
}
if (op.getCalledComputations().size() > 1)
return op.emitOpError()
<< "cannot export with more than one called computations";
if (!op.getCalledComputations().empty() && op.getOperandLayouts() &&
op.getResultLayouts()) {
return op.emitOpError() << "cannot export if both called computation and "
"layouts are specified";
}
auto xla_api_version = xla::ConvertCustomCallApiVersion(op.getApiVersion());
if (!xla_api_version.ok()) return failure();
std::string backend_config;
if (*xla_api_version == xla::CustomCallApiVersion::API_VERSION_TYPED_FFI) {
if (auto dict = mlir::dyn_cast_or_null<mlir::DictionaryAttr>(
op.getBackendConfig().value_or(mlir::Attribute()))) {
llvm::raw_string_ostream(backend_config) << dict;
}
} else {
if (auto str = mlir::dyn_cast_or_null<mlir::StringAttr>(
op.getBackendConfig().value_or(mlir::Attribute()))) {
llvm::raw_string_ostream(backend_config) << str.strref();
}
}
absl::StatusOr<xla::Literal> literal;
const xla::Literal* literal_ptr = nullptr;
auto literal_attr = op->getAttrOfType<DenseElementsAttr>(kMhloLiteral);
if (literal_attr) {
literal = CreateArrayLiteralFromAttr(literal_attr, {});
if (!literal.ok()) return failure();
literal_ptr = &*literal;
}
auto aliasInfo =
xla::ConvertOutputOperandAliasing(op.getOutputOperandAliases());
auto output_operand_aliasing = absl::MakeSpan(*aliasInfo);
auto custom_call_schedule =
xla::ConvertCustomCallSchedule(op.getCustomCallSchedule());
if (!custom_call_schedule.ok()) return failure();
std::string call_target_name(op.getCallTargetName());
xla::Shape result_shape;
if (op->getNumResults() == 1) {
result_shape = xla::TypeToShape(op.getResult(0).getType());
} else {
std::vector<xla::Shape> subshapes;
for (const auto& item : op.getResults().getType()) {
subshapes.push_back(xla::TypeToShape(item));
}
result_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
}
xla::XlaOp custom_call;
if (op.getCalledComputations().size() == 1) {
mlir::func::FuncOp callee = ctx.converter->LookUpSymbol(
mlir::cast<FlatSymbolRefAttr>(op.getCalledComputations()[0]));
if (failed(ctx.converter->RunOnFunction(callee))) return failure();
xla::XlaComputation& computation =
ctx.converter->GetLoweredComputation(callee);
custom_call = xla::CustomCallWithComputation(
ctx.builder, call_target_name, args, computation, result_shape,
backend_config, op.getHasSideEffect(), output_operand_aliasing,
literal_ptr, *custom_call_schedule, *xla_api_version);
} else if (op.getOperandLayouts() && op.getResultLayouts()) {
auto operand_shapes_with_layout = ConvertTypesToShapesWithLayout(
op.getOperandTypes(), op.getOperandLayouts().value());
SetLayout(result_shape, op.getResultLayouts().value());
custom_call = xla::CustomCallWithLayout(
ctx.builder, call_target_name, args, result_shape,
operand_shapes_with_layout, backend_config, op.getHasSideEffect(),
output_operand_aliasing, literal_ptr, *custom_call_schedule,
*xla_api_version);
} else {
custom_call = xla::CustomCall(
ctx.builder, call_target_name, args, result_shape, backend_config,
op.getHasSideEffect(), output_operand_aliasing, literal_ptr,
*custom_call_schedule, *xla_api_version);
}
if (op->getNumResults() == 1) {
value_map[op.getResult(0)] = custom_call;
} else {
BuildGetTupleElementsForTupleResults(op, custom_call, ctx);
}
return success();
}
LogicalResult ExportXlaOp(InfeedOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
auto result_types = op.getResultTypes();
auto num_results = op.getNumResults();
xla::Shape token_shape = xla::TypeToShape(result_types[num_results - 1]);
std::vector<xla::Shape> subshapes;
for (const auto& item : llvm::enumerate(result_types)) {
if (item.index() == num_results - 1) break;
subshapes.push_back(xla::TypeToShape(item.value()));
}
xla::Shape data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
auto xla_result = xla::InfeedWithToken(token, data_shape,
std::string(op.getInfeedConfig()));
ctx.builder->ClearSharding();
if (!subshapes.empty()) {
auto data_tuple_element = xla::GetTupleElement(xla_result, 0);
for (const auto& item : llvm::enumerate(op.getResults())) {
if (item.index() == num_results - 1) break;
value_map[item.value()] =
xla::GetTupleElement(data_tuple_element, item.index());
}
}
value_map[op.getResult(num_results - 1)] =
xla::GetTupleElement(xla_result, 1);
return success();
}
LogicalResult ExportXlaOp(IotaOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
value_map[op] = xla::Iota(ctx.builder, xla::TypeToShape(op.getType()),
op.getIotaDimension());
return success();
}
LogicalResult ExportXlaOp(MapOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComputation(),
&computation))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
value_map[op] = xla::Map(ctx.builder, operands, computation,
Convert_dimensions(op.getDimensions()));
return success();
}
LogicalResult ExportXlaOp(OutfeedOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
const auto sharding = ctx.builder->sharding();
xla::XlaOp operand;
if (sharding.has_value() &&
sharding->tuple_shardings_size() != operands.size()) {
xla::XlaScopedShardingAssignment scoped_sharding(ctx.builder, std::nullopt);
operand = Tuple(ctx.builder, operands);
} else {
operand = Tuple(ctx.builder, operands);
}
std::vector<xla::Shape> subshapes;
for (auto operand : op.getInputs())
subshapes.push_back(xla::TypeToShape(operand.getType()));
xla::Shape shape_with_layout = xla::ShapeUtil::MakeTupleShape(subshapes);
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
value_map[op] = xla::OutfeedWithToken(operand, token, shape_with_layout,
std::string(op.getOutfeedConfig()));
return success();
}
LogicalResult ExportXlaOp(PartitionIdOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::Shape shape = xla::TypeToShape(op.getResult().getType());
value_map[op] =
xla::internal::XlaBuilderFriend::BuildPartitionId(ctx.builder, shape);
return success();
}
LogicalResult ExportXlaOp(PadOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::PaddingConfig padding_config;
auto edge_padding_low = ConvertDenseIntAttr(op.getEdgePaddingLow());
auto edge_padding_high = ConvertDenseIntAttr(op.getEdgePaddingHigh());
auto interior_padding = ConvertDenseIntAttr(op.getInteriorPadding());
for (int64_t i = 0, end = edge_padding_low.size(); i < end; ++i) {
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(edge_padding_low[i]);
dims->set_edge_padding_high(edge_padding_high[i]);
dims->set_interior_padding(interior_padding[i]);
}
xla::XlaOp operand, padding_value;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getPaddingValue(), value_map, &padding_value, op)))
return failure();
value_map[op] = xla::Pad(operand, padding_value, padding_config);
return success();
}
LogicalResult ExportXlaOp(RecvOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
auto result_types = op.getResultTypes();
auto num_results = op.getNumResults();
xla::Shape token_shape = xla::TypeToShape(result_types[num_results - 1]);
std::vector<xla::Shape> subshapes;
for (const auto& item : llvm::enumerate(result_types)) {
if (item.index() == num_results - 1) break;
subshapes.push_back(xla::TypeToShape(item.value()));
}
xla::Shape data_shape;
if (subshapes.size() == 1)
data_shape = subshapes[0];
else
data_shape = xla::ShapeUtil::MakeTupleShape(subshapes);
token = xla::internal::XlaBuilderFriend::BuildRecv(
ctx.builder, token, data_shape,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
xla::XlaOp xla_result = xla::internal::XlaBuilderFriend::BuildRecvDone(
ctx.builder, token, data_shape,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
auto data_tuple_element = xla::GetTupleElement(xla_result, 0);
if (subshapes.size() == 1) {
value_map[op.getResult(0)] = data_tuple_element;
} else {
for (const auto& item : llvm::enumerate(op.getResults())) {
if (item.index() == num_results - 1) break;
value_map[item.value()] =
xla::GetTupleElement(data_tuple_element, item.index());
}
}
value_map[op.getResult(num_results - 1)] =
xla::GetTupleElement(xla_result, 1);
return success();
}
LogicalResult ExportXlaOp(ReduceOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation body;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getBody(), &body))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands, init_values;
if (failed(GetTuple(op, op.getInputs(), ctx, operands)) ||
failed(GetTuple(op, op.getInitValues(), ctx, init_values))) {
return failure();
}
xla::XlaOp result =
xla::Reduce(ctx.builder, operands, init_values, body,
Convert_broadcast_dimensions(op.getDimensions()));
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = result;
} else {
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(ReduceWindowOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation body;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getBody(), &body))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands, init_values;
if (failed(GetTuple(op, op.getInputs(), ctx, operands)) ||
failed(GetTuple(op, op.getInitValues(), ctx, init_values))) {
return failure();
}
xla::XlaOp result = xla::ReduceWindowWithGeneralPadding(
operands, init_values, body,
ConvertDenseIntAttr(op.getWindowDimensions()),
ConvertDenseIntAttr(op.getWindowStrides()),
ConvertDenseIntAttr(op.getBaseDilations()),
ConvertDenseIntAttr(op.getWindowDilations()),
Convert_padding(op.getPadding()));
if (op.getNumResults() == 1) {
value_map[op.getResult(0)] = result;
} else {
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(ReshapeOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
value_map[op] =
xla::Reshape(operand, xla::TypeToShape(op.getType()).dimensions());
return success();
}
LogicalResult ExportXlaOp(ReturnOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(RngBitGeneratorOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto results = op.getResults();
auto xla_arg_1 = value_map[*op.getODSOperands(0).begin()];
auto xla_result = xla::RngBitGenerator(
static_cast<xla::RandomAlgorithm>(op.getRngAlgorithm()),
Unwrap(xla_arg_1), xla::TypeToShape(results[1].getType()));
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(XlaRngGetAndUpdateStateOp op, OpLoweringContext ctx) {
(*ctx.values)[op.getResult()] =
xla::internal::XlaBuilderFriend::BuildRngGetAndUpdateState(
ctx.builder, static_cast<int64_t>(op.getDelta()),
xla::TypeToShape(op.getType()));
return mlir::success();
}
LogicalResult ExportXlaOp(BatchNormGradOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, scale, mean, variance, grad_output;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getScale(), value_map, &scale, op))) return failure();
if (failed(GetXlaOp(op.getMean(), value_map, &mean, op))) return failure();
if (failed(GetXlaOp(op.getVariance(), value_map, &variance, op)))
return failure();
if (failed(GetXlaOp(op.getGradOutput(), value_map, &grad_output, op)))
return failure();
auto xla_result =
xla::BatchNormGrad(operand, scale, mean, variance, grad_output,
ConvertAPFloat(op.getEpsilon()), op.getFeatureIndex());
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(BatchNormTrainingOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand, scale, offset;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getScale(), value_map, &scale, op))) return failure();
if (failed(GetXlaOp(op.getOffset(), value_map, &offset, op)))
return failure();
auto xla_result = xla::BatchNormTraining(operand, scale, offset,
ConvertAPFloat(op.getEpsilon()),
op.getFeatureIndex());
BuildGetTupleElementsForTupleResults(op, xla_result, ctx);
return mlir::success();
}
LogicalResult ExportXlaOp(RngOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp a, b;
if (failed(GetXlaOp(op.getA(), value_map, &a, op))) return failure();
if (failed(GetXlaOp(op.getB(), value_map, &b, op))) return failure();
if (op.getRngDistribution() == RngDistribution::UNIFORM) {
value_map[op] = xla::RngUniform(a, b, xla::TypeToShape(op.getType()));
return success();
} else if (op.getRngDistribution() == RngDistribution::NORMAL) {
value_map[op] = xla::RngNormal(a, b, xla::TypeToShape(op.getType()));
return success();
}
return failure();
}
LogicalResult ExportXlaOp(ScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation update_computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getUpdateComputation(),
&update_computation))) {
return failure();
}
xla::ScatterDimensionNumbers dimension_numbers =
Convert_scatter_dimension_numbers(op.getScatterDimensionNumbers());
llvm::SmallVector<xla::XlaOp> operands;
llvm::SmallVector<xla::XlaOp> updates;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
if (failed(GetTuple(op, op.getUpdates(), ctx, updates))) return failure();
xla::XlaOp scatter_indices;
if (failed(GetXlaOp(op.getScatterIndices(), value_map, &scatter_indices, op)))
return failure();
auto scatter_op = xla::Scatter(
operands, scatter_indices, updates, update_computation, dimension_numbers,
op.getIndicesAreSorted(), op.getUniqueIndices());
if (op->getNumResults() == 1) {
value_map[op.getResult(0)] = scatter_op;
return success();
}
BuildGetTupleElementsForTupleResults(op, scatter_op, ctx);
return success();
}
LogicalResult ExportXlaOp(SelectAndScatterOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaComputation select;
xla::XlaComputation scatter;
if (failed(
ctx.converter->LowerRegionAsComputation(&op.getSelect(), &select)) ||
failed(ctx.converter->LowerRegionAsComputation(&op.getScatter(),
&scatter))) {
return failure();
}
xla::XlaOp operand, source, init_value;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
if (failed(GetXlaOp(op.getSource(), value_map, &source, op)))
return failure();
if (failed(GetXlaOp(op.getInitValue(), value_map, &init_value, op)))
return failure();
value_map[op] = xla::SelectAndScatterWithGeneralPadding(
operand, select, ConvertDenseIntAttr(op.getWindowDimensions()),
ConvertDenseIntAttr(op.getWindowStrides()),
Convert_padding(op.getPadding()), source, init_value, scatter);
return success();
}
LogicalResult ExportXlaOp(SendOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
xla::XlaOp operand;
if (operands.size() == 1)
operand = operands[0];
else
operand = Tuple(ctx.builder, operands);
xla::XlaOp token;
if (failed(GetXlaOp(op.getToken(), value_map, &token, op))) return failure();
token = xla::internal::XlaBuilderFriend::BuildSend(
ctx.builder, operand, token,
Convert_channel_handle(op.getChannelHandle()), op.getIsHostTransfer());
value_map[op] = xla::internal::XlaBuilderFriend::BuildSendDone(
ctx.builder, token, Convert_channel_handle(op.getChannelHandle()),
op.getIsHostTransfer());
return success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::SetDimensionSizeOp op,
OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp array;
if (failed(GetXlaOp(op.getOperand(), value_map, &array, op)))
return mlir::failure();
auto dimension = Convertuint64_t(op.getDimension());
auto shape_or = ctx.builder->GetShapePtr(array);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::XlaOp xla_result;
if (auto constant = llvm::dyn_cast_or_null<mlir::mhlo::ConstantOp>(
op.getSize().getDefiningOp());
constant != nullptr) {
auto value = constant.getValue();
auto values = value.getValues<mlir::IntegerAttr>();
if ((*values.begin()).getValue().getSExtValue() ==
shape_or.value()->dimensions(dimension)) {
xla_result = xla::RemoveDynamicDimension(array, dimension);
}
}
if (!xla_result.valid()) {
xla::XlaOp dynamic_size;
if (failed(GetXlaOp(op.getSize(), value_map, &dynamic_size, op)))
return mlir::failure();
xla_result = xla::SetDimensionSize(array, dynamic_size, dimension);
}
value_map[result] = xla_result;
return mlir::success();
}
mlir::LogicalResult ExportXlaOp(mlir::mhlo::SineOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp arg;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &arg, op)))
return mlir::failure();
auto xla_result = xla::Sin(Unwrap(arg));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(SortOp op, OpLoweringContext ctx) {
xla::XlaComputation comparator;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getComparator(),
&comparator)))
return failure();
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getInputs(), ctx, operands))) return failure();
auto sorted =
xla::Sort(operands, comparator, op.getDimension(), op.getIsStable());
auto& value_map = *ctx.values;
auto shape_or = sorted.builder()->GetShape(sorted);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::Shape& shape = shape_or.value();
if (!shape.IsTuple()) {
value_map[op.getResult(0)] = sorted;
return success();
}
BuildGetTupleElementsForTupleResults(op, sorted, ctx);
return success();
}
LogicalResult ExportXlaOp(SubtractOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp lhs;
if (failed(GetXlaOp(*op.getODSOperands(0).begin(), value_map, &lhs, op)))
return mlir::failure();
xla::XlaOp rhs;
if (failed(GetXlaOp(*op.getODSOperands(1).begin(), value_map, &rhs, op)))
return mlir::failure();
auto xla_result = xla::Sub(Unwrap(lhs), Unwrap(rhs));
value_map[result] = xla_result;
return mlir::success();
}
LogicalResult ExportXlaOp(TraceOp op, OpLoweringContext ctx) {
return success();
}
LogicalResult ExportXlaOp(WhileOp op, OpLoweringContext ctx) {
xla::XlaComputation condition;
xla::XlaComputation body;
llvm::SmallVector<std::optional<xla::OpSharding>> res_shardings =
GetResultShardings(ctx.builder->sharding(), op->getNumResults());
llvm::SetVector<mlir::Value> implicit_operand_set;
getUsedValuesDefinedAbove(op->getRegions(), implicit_operand_set);
llvm::SmallVector<mlir::Value> implicit_operands =
implicit_operand_set.takeVector();
llvm::SmallVector<xla::XlaOp> implicit_args;
if (failed(GetXlaOps(op, implicit_operands, ctx, implicit_args)))
return failure();
llvm::SmallVector<std::optional<xla::OpSharding>> implicit_shardings;
if (!implicit_args.empty() && !res_shardings.empty()) {
implicit_shardings = GetXlaOpShardings(implicit_args);
res_shardings.append(implicit_shardings.begin(), implicit_shardings.end());
if (std::optional<xla::OpSharding> new_sharding =
CreateTupleSharding(res_shardings)) {
ctx.builder->SetSharding(*new_sharding);
}
}
if (failed(ctx.converter->LowerRegionAsComputation(
&op.getBody(), &body, implicit_operands,
implicit_operands,
true, res_shardings,
res_shardings)) ||
failed(ctx.converter->LowerRegionAsComputation(
&op.getCond(), &condition, implicit_operands,
{},
true, res_shardings))) {
return failure();
}
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getOperands(), ctx, operands))) return failure();
operands.append(implicit_args.begin(), implicit_args.end());
xla::XlaOp operand = operands[0];
if (operands.size() > 1) operand = Tuple(ctx.builder, operands);
xla::XlaOp whileop = xla::While(condition, body, operand);
auto& value_map = *ctx.values;
auto shape_or = whileop.builder()->GetShape(whileop);
if (!shape_or.ok()) {
return op.emitError(shape_or.status().ToString());
}
xla::Shape& shape = shape_or.value();
if (!shape.IsTuple()) {
value_map[op.getResult(0)] = whileop;
return success();
}
BuildGetTupleElementsForTupleResults(
op, whileop, ctx, implicit_args.size());
return success();
}
LogicalResult ExportXlaOp(OptimizationBarrierOp op, OpLoweringContext ctx) {
llvm::SmallVector<xla::XlaOp> operands;
if (failed(GetTuple(op, op.getOperands(), ctx, operands))) return failure();
if (operands.empty()) return success();
auto& value_map = *ctx.values;
if (operands.size() == 1) {
value_map[op.getOperation()->getResult(0)] =
xla::OptimizationBarrier(operands[0]);
} else {
auto result = xla::OptimizationBarrier(Tuple(ctx.builder, operands));
BuildGetTupleElementsForTupleResults(op, result, ctx);
}
return success();
}
LogicalResult ExportXlaOp(FusionOp op, OpLoweringContext ctx) {
if (!op.getFusionKind()) {
op.emitOpError() << "requires fusion kind for HLO translation";
return failure();
}
xla::XlaComputation fused_computation;
if (failed(ctx.converter->LowerRegionAsComputation(&op.getFusedComputation(),
&fused_computation)))
return failure();
auto& values = *ctx.values;
auto aliasInfo =
xla::ConvertOutputOperandAliasing(op.getOutputOperandAliases());
auto output_operand_aliasing = absl::MakeSpan(*aliasInfo);
llvm::SmallVector<xla::XlaOp, 4> operands;
for (auto operand : op.getInputs()) operands.push_back(values[operand]);
auto fusion_kind_string =
mlir::mhlo::stringifyFusionKind(op.getFusionKind().value());
xla::XlaOp fusion = xla::internal::XlaBuilderFriend::BuildFusion(
ctx.builder, operands,
absl::string_view(fusion_kind_string.data(), fusion_kind_string.size()),
fused_computation, output_operand_aliasing);
if (op.getNumResults() == 1) {
values[op.getResult(0)] = fusion;
} else {
BuildGetTupleElementsForTupleResults(op, fusion, ctx);
}
return success();
}
LogicalResult ExportXlaOp(BitcastOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
xla::XlaOp bitcast = xla::internal::XlaBuilderFriend::BuildBitcast(
ctx.builder, operand, xla::TypeToShape(op.getType()));
value_map[op] = bitcast;
if (ctx.converter->GetOptions().propagate_bitcast_layouts_to_backend_config) {
xla::HloInstructionProto* bitcast_proto =
xla::internal::XlaBuilderFriend::GetInstruction(bitcast);
xla::HloInstructionProto* operand_proto =
xla::internal::XlaBuilderFriend::GetInstruction(operand);
xla::LayoutProto result_layout =
ExtractLayout(op, bitcast_proto->shape().dimensions_size(),
kResultLayout)
.ToProto();
xla::LayoutProto source_layout =
ExtractLayout(op, operand_proto->shape().dimensions_size(),
kSourceLayout)
.ToProto();
xla::gpu::BitcastBackendConfig bitcast_config;
*bitcast_config.mutable_source_layout() = source_layout;
*bitcast_config.mutable_result_layout() = result_layout;
*bitcast_proto->mutable_backend_config() =
bitcast_config.SerializeAsString();
}
return success();
}
LogicalResult ExportXlaOp(UniformQuantizeOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(UniformDequantizeOp op, OpLoweringContext ctx) {
return failure();
}
LogicalResult ExportXlaOp(TopKOp op, OpLoweringContext ctx) {
auto& value_map = *ctx.values;
xla::XlaOp operand;
if (failed(GetXlaOp(op.getOperand(), value_map, &operand, op)))
return failure();
auto topk = xla::TopK(operand, op.getK(), op.getLargest());
BuildGetTupleElementsForTupleResults(op, topk, ctx);
return success();
}
LogicalResult ExportXlaOp(MinimumBroadcastShapesOp op, OpLoweringContext ctx) {
return failure();
}
}
}
}
#include "xla/hlo/translate/mhlo_to_hlo/operator_writers.inc"
namespace mlir {
namespace {
LogicalResult ConvertLayout(mlir::Operation* op, const mlir::ArrayAttr& layout,
xla::ShapeProto* shape) {
if (shape->element_type() == xla::TUPLE) {
auto subshapes = shape->mutable_tuple_shapes();
size_t subshapes_data_size = subshapes->size();
if (!subshapes->empty() &&
subshapes->Mutable(subshapes->size() - 1)->element_type() == xla::TOKEN)
subshapes_data_size = subshapes->size() - 1;
if (layout.size() != subshapes_data_size) {
op->emitOpError() << "Expected layout of size " << layout.size()
<< ", but found " << subshapes->size();
return failure();
}
for (int i = 0; i < subshapes_data_size; i++) {
mlir::Attribute child = layout[i];
if (mlir::isa<mlir::UnitAttr>(child)) {
continue;
}
mlir::ArrayAttr c = mlir::dyn_cast<mlir::ArrayAttr>(child);
if (!c) {
op->emitOpError() << "Type Error: Expected layout array attribute";
return failure();
}
if (failed(ConvertLayout(op, c, subshapes->Mutable(i)))) {
return failure();
}
}
} else {
int rank = shape->dimensions().size();
if (rank) {
if (layout.size() != rank) {
return failure();
}
std::vector<int64_t> array(rank);
for (int i = 0; i < rank; i++) {
mlir::IntegerAttr attr = mlir::dyn_cast<mlir::IntegerAttr>(layout[i]);
if (!attr) {
op->emitOpError() << "Type Error: Expected layout integer attribute";
return failure();
}
array[i] = attr.getInt();
}
*shape->mutable_layout() = xla::LayoutUtil::MakeLayout(array).ToProto();
}
}
return success();
}
LogicalResult ConvertInfeedtLayout(mlir::Operation* op,
const mlir::ArrayAttr& layout,
xla::ShapeProto* shape,
int64_t layout_index = 0) {
if (shape->element_type() != xla::TUPLE) {
mlir::ArrayAttr child_layout =
mlir::dyn_cast<mlir::ArrayAttr>(layout[layout_index]);
if (!child_layout) {
op->emitOpError() << "Type Error: Expected layout array attribute";
return failure();
}
int rank = shape->dimensions().size();
if (rank) {
if (child_layout.size() != rank) {
return failure();
}
std::vector<int64_t> array(rank);
for (int i = 0; i < rank; i++) {
mlir::IntegerAttr attr =
mlir::dyn_cast<mlir::IntegerAttr>(child_layout[i]);
if (!attr) {
op->emitOpError() << "Type Error: Expected layout integer attribute";
return failure();
}
array[i] = attr.getInt();
}
*shape->mutable_layout() = xla::LayoutUtil::MakeLayout(array).ToProto();
}
return success();
}
auto subshapes = shape->mutable_tuple_shapes();
auto datashape = subshapes->Mutable(0);
if (datashape->element_type() == xla::TUPLE) {
auto data_subshapes = datashape->mutable_tuple_shapes();
if (layout.size() != data_subshapes->size()) {
op->emitOpError() << "Expected " << data_subshapes->size()
<< " layout attribute(s) for infeed data, but found "
<< layout.size();
return failure();
}
for (int i = 0; i < data_subshapes->size(); i++) {
if (failed(
ConvertInfeedtLayout(op, layout, data_subshapes->Mutable(i), i)))
return failure();
}
} else {
if (layout.size() != subshapes->size()) {
op->emitOpError() << "Expected " << subshapes->size()
<< " layout attribute(s) for infeed data, but found "
<< layout.size();
return failure();
}
for (int i = 0; i < subshapes->size(); i++) {
if (failed(ConvertInfeedtLayout(op, layout, subshapes->Mutable(i), i)))
return failure();
}
}
return success();
}
LogicalResult ExportXlaOperatorWrapped(mlir::Operation* inst,
OpLoweringContext ctx) {
auto op = dyn_cast<mlir::mhlo::AddOp>(inst);
if (op && mlir::cast<mlir::TensorType>(op.getResult().getType())
.getElementType()
.isSignlessInteger(1)) {
auto& value_map = *ctx.values;
auto result = op.getResult();
xla::XlaOp xla_arg_0;
if (failed(GetXlaOp(op.getLhs(), value_map, &xla_arg_0, op)))
return mlir::failure();
xla::XlaOp xla_arg_1;
if (failed(GetXlaOp(op.getRhs(), value_map, &xla_arg_1, op)))
return mlir::failure();
auto xla_result = xla::Xor(Unwrap(xla_arg_0), Unwrap(xla_arg_1));
value_map[result] = xla_result;
return mlir::success();
}
return ExportXlaOperator(inst, ctx);
}
LogicalResult ConvertToHloModule::PropagateLayouts(
const MlirToHloConversionOptions& options, mlir::Operation* inst,
xla::XlaOp xla_op) {
if (options.propagate_layouts) {
auto* shape = xla::internal::XlaBuilderFriend::GetInstruction(xla_op)
->mutable_shape();
mlir::FailureOr<xla::Shape> mlir_shape_or = ExtractXlaShape(inst);
if (failed(mlir_shape_or)) return failure();
*shape = mlir_shape_or->ToProto();
}
return success();
}
LogicalResult ConvertToHloModule::LowerCast(
mlir::Operation* inst, const MlirToHloConversionOptions& options,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
auto cast_op = cast<mlir::tensor::CastOp>(inst);
Value operand = cast_op.getOperand();
auto ty = mlir::dyn_cast<ShapedType>(operand.getType());
if (!ty || !IsBoundedOrStatic(ty)) {
inst->emitOpError()
<< "requires static or bounded operand for HLO translation";
return failure();
}
xla::XlaOp xla_operand;
auto& value_map = *value_lowering;
if (failed(GetXlaOp(operand, value_map, &xla_operand, cast_op)))
return failure();
value_map[cast_op.getResult()] = xla_operand;
if (failed(PropagateLayouts(options, inst, xla_operand))) {
return failure();
}
return success();
}
LogicalResult ConvertToHloModule::LowerCompositeCall(
mlir::Operation* inst, xla::XlaBuilder* module_builder,
xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value) {
auto& value_map = *value_lowering;
SmallVector<xla::XlaOp, 1> operands;
for (const Value& val : inst->getOperands()) {
xla::XlaOp operand;
if (failed(GetXlaOp(val, value_map, &operand, inst))) {
return failure();
}
operands.push_back(operand);
}
auto composite_op = cast<mhlo::CompositeOp>(inst);
xla::XlaComputation computation;
if (failed(LowerBasicBlockAsFunction(
&module_
.lookupSymbol<mlir::func::FuncOp>(composite_op.getDecomposition())
.getBody()
.front(),
module_builder_
.CreateSubBuilder(composite_op.getDecomposition().str())
.get(),
false,
false,
{},
{}, {},
{}, &computation,
{}))) {
return failure();
}
std::string composite_attributes;
llvm::raw_string_ostream(composite_attributes)
<< composite_op.getCompositeAttributes();
xla::XlaOp composite_call = xla::CompositeCall(
builder, computation, operands, composite_op.getName().str(),
composite_attributes, composite_op.getVersion());
unsigned num_results = composite_op.getNumResults();
if (num_results > 1) {
for (unsigned i = 0; i != num_results; ++i) {
value_map[composite_op.getResult(i)] =
xla::GetTupleElement(composite_call, i);
}
} else if (num_results == 1) {
value_map[composite_op.getResult(0)] = composite_call;
}
*return_value = composite_call;
return success();
}
LogicalResult ConvertToHloModule::LowerConstant(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
ElementsAttr const_attr) {
if (!mlir::isa<ShapedType>(inst->getResult(0).getType())) {
return inst->emitError(
"expected shaped type during constant mhlo -> hlo translation");
}
mlir::FailureOr<xla::Shape> shape_or = ExtractXlaShape(inst);
if (failed(shape_or)) return failure();
auto literal_or = CreateArrayLiteralFromAttr(const_attr, shape_or->layout());
if (!literal_or.ok()) return inst->emitError(literal_or.status().ToString());
xla::XlaScopedShardingAssignment scoped_sharding(
builder, CreateOpShardingFromAttribute(inst));
auto constant = xla::ConstantLiteral(builder, literal_or.value());
auto& value_map = *value_lowering;
value_map[inst->getResult(0)] = constant;
return success();
}
LogicalResult ConvertToHloModule::LowerInfeed(
mlir::Operation* inst, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
mlir::ArrayAttr layout = inst->getAttrOfType<mlir::ArrayAttr>(kLayout);
if (!layout) return success();
auto num_results = inst->getNumResults();
bool propagate_layout_to_data_tuple = true;
for (unsigned i = 0; i < num_results; i++) {
auto iter = value_lowering->find(inst->getResult(i));
if (iter == value_lowering->end()) {
inst->emitOpError() << "inst's result value at index " << i
<< " has no match in value_lowering";
return failure();
}
auto xla_gte_op = iter->second;
xla::HloInstructionProto* get_tuple_element_proto =
xla::internal::XlaBuilderFriend::GetInstruction(xla_gte_op);
assert(xla::StringToHloOpcode(get_tuple_element_proto->opcode()).value() ==
xla::HloOpcode::kGetTupleElement &&
"The token-result of mhlo.InfeedOp should be mapped to a "
"xla::HloOpcode::kGetTupleElement");
if (i == num_results - 1) {
xla::HloInstructionProto* xla_infeed_op_proto =
xla::internal::XlaBuilderFriend::GetInstructionByHandle(
xla_gte_op.builder(), get_tuple_element_proto->operand_ids(0));
assert(xla::StringToHloOpcode(xla_infeed_op_proto->opcode()).value() ==
xla::HloOpcode::kInfeed &&
"Expected xla::HloOpcode::kInfeed op");
auto* shape = xla_infeed_op_proto->mutable_shape();
if (failed(ConvertInfeedtLayout(inst, layout, shape))) return failure();
continue;
}
auto* shape = get_tuple_element_proto->mutable_shape();
if (failed(ConvertInfeedtLayout(inst, layout, shape, i))) return failure();
if (propagate_layout_to_data_tuple) {
xla::HloInstructionProto* data_tuple_proto =
xla::internal::XlaBuilderFriend::GetInstructionByHandle(
xla_gte_op.builder(), get_tuple_element_proto->operand_ids(0));
auto* data_tuple_shape = data_tuple_proto->mutable_shape();
assert(xla::StringToHloOpcode(data_tuple_proto->opcode()).value() ==
xla::HloOpcode::kGetTupleElement &&
"Expected a xla:tupleOp for all the data results.");
if (failed(ConvertInfeedtLayout(inst, layout, data_tuple_shape)))
return failure();
}
propagate_layout_to_data_tuple = false;
}
return success();
}
LogicalResult ConvertToHloModule::LowerReturn(
Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value, const MlirToHloConversionOptions& options) {
unsigned num_return_values = inst->getNumOperands() + implicit_results.size();
std::optional<xla::OpSharding> ret_tuple_sharding =
CreateTupleSharding(ret_shardings);
auto& value_map = *value_lowering;
if ((options_.return_tuple && is_entry_function) || num_return_values != 1) {
std::vector<xla::XlaOp> returns;
returns.reserve(num_return_values);
for (Value ret : inst->getOperands()) {
xla::XlaOp& operand = returns.emplace_back();
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
}
for (Value ret : implicit_results) {
xla::XlaOp& operand = returns.emplace_back();
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
}
if (is_entry_function && ret_tuple_sharding) {
assert(implicit_results.empty() &&
"entry functions shouldn't have implicit results");
for (OpOperand& ret : inst->getOpOperands()) {
unsigned index = ret.getOperandNumber();
xla::Shape return_shape = xla::TypeToShape(ret.get().getType());
absl::StatusOr<xla::XlaOp> reshape =
ReshapeWithCorrectRepresentationAndSharding(
builder, returns[index], return_shape,
options_.layout_preference_fn, options_.shape_representation_fn,
ret_shardings[index],
false);
if (!reshape.ok())
return inst->emitError() << reshape.status().message();
returns[index] = reshape.value();
}
}
xla::XlaScopedShardingAssignment scoped_sharding(builder,
ret_tuple_sharding);
*return_value = xla::Tuple(builder, returns);
return success();
}
if (num_return_values == 1) {
Value ret = implicit_results.empty() ? inst->getOperand(0)
: implicit_results.front();
xla::XlaOp operand;
if (failed(GetXlaOp(ret, value_map, &operand, inst))) return failure();
if (ret_tuple_sharding) {
auto tuple = Tuple(builder, {operand});
builder->SetSharding(*ret_shardings[0]);
*return_value = GetTupleElement(tuple, 0);
builder->ClearSharding();
} else {
*return_value = operand;
}
}
return success();
}
LogicalResult ConvertToHloModule::Lower(
mlir::Operation* inst, bool is_entry_function,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<mlir::Value> implicit_results, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering,
xla::XlaOp* return_value) {
if (inst->getDialect() !=
inst->getContext()->getLoadedDialect<mlir::mhlo::MhloDialect>() &&
!mlir::isa<mlir::func::ConstantOp, mlir::arith::ConstantOp,
mlir::func::CallOp, mlir::tensor::CastOp,
mlir::func::ReturnOp>(inst)) {
inst->emitOpError("unsupported op for export to XLA");
return failure();
}
*return_value = xla::XlaOp();
if (succeeded(ExportXlaOperatorWrapped(
inst,
{value_lowering, this, builder, &stack_frame_indexes_builder_}))) {
if (inst->getNumResults() == 1) {
auto iter = value_lowering->find(inst->getResult(0));
if (iter == value_lowering->end()) {
inst->emitOpError(
"inst has a result, but it's not found in value_lowering");
return failure();
}
if (failed(PropagateLayouts(options_, inst, iter->second))) {
return failure();
}
}
if (isa<mhlo::InfeedOp>(inst)) {
return LowerInfeed(inst, builder, value_lowering);
}
return success();
}
if (auto call_op = dyn_cast<mlir::func::CallOp>(inst)) {
return LowerFunctionCall(call_op, builder, value_lowering);
}
if (isa<mlir::tensor::CastOp>(inst)) {
return LowerCast(inst, options_, value_lowering);
}
if (auto composite_op = dyn_cast<mhlo::CompositeOp>(inst)) {
return LowerCompositeCall(inst, &module_builder_, builder, value_lowering,
return_value);
}
ElementsAttr const_attr;
if (matchPattern(inst, m_Constant(&const_attr))) {
return LowerConstant(inst, builder, value_lowering, const_attr);
}
if (isa<mhlo::ReturnOp, mlir::func::ReturnOp>(inst)) {
return LowerReturn(inst, is_entry_function, ret_shardings, implicit_results,
builder, value_lowering, return_value, options_);
}
inst->emitOpError() << "can't be translated to XLA HLO";
return failure();
}
LogicalResult ConvertToHloModule::LowerFunctionCall(
mlir::func::CallOp call_op, xla::XlaBuilder* builder,
ConvertToHloModule::ValueLoweringMap* value_lowering) {
auto& value_map = *value_lowering;
mlir::func::FuncOp callee =
module_.lookupSymbol<mlir::func::FuncOp>(call_op.getCallee());
if (failed(RunOnFunction(callee))) return failure();
std::vector<xla::XlaOp> operands;
for (auto operand : call_op.getOperands()) {
xla::XlaOp xla_operand;
if (failed(GetXlaOp(operand, value_map, &xla_operand, call_op)))
return failure();
operands.push_back(xla_operand);
}
xla::FrontendAttributes fe_attrs = CreateXlaFrontendAttributesFromOp(call_op);
xla::XlaScopedFrontendAttributesAssignment assignment(builder, fe_attrs);
xla::XlaOp call_result =
xla::Call(builder, lowered_computation_[callee], operands);
unsigned num_results = call_op.getNumResults();
if (num_results > 1) {
for (unsigned i = 0; i != num_results; ++i) {
value_map[call_op.getResult(i)] = xla::GetTupleElement(call_result, i);
}
} else if (num_results == 1) {
value_map[call_op.getResult(0)] = call_result;
}
return success();
}
LogicalResult ConvertToHloModule::RunOnFunction(mlir::func::FuncOp f) {
if (lowered_computation_.count(f)) return success();
if (!llvm::hasSingleElement(f)) {
return f.emitError("only single block Function supported");
}
std::unique_ptr<xla::XlaBuilder> builder_up;
bool entry_function = f.getName() == kMain;
if (!entry_function)
builder_up = module_builder_.CreateSubBuilder(f.getName().str());
auto& builder = entry_function ? module_builder_ : *builder_up;
xla::XlaComputation computation;
std::vector<bool> entry_args_same_across_replicas;
llvm::SmallVector<std::optional<xla::OpSharding>, 4> arg_shardings;
llvm::SmallVector<std::optional<xla::OpSharding>, 4> ret_shardings;
llvm::SmallVector<std::optional<xla::FrontendAttributes>, 4> arg_fe_attrs;
if (entry_function) {
bool any_arg_replicated = false;
entry_args_same_across_replicas.reserve(f.getNumArguments());
for (int64_t i = 0; i < f.getNumArguments(); ++i) {
auto attr = f.getArgAttrOfType<mlir::BoolAttr>(i, kMhloReplication);
entry_args_same_across_replicas.push_back(attr != nullptr &&
attr.getValue());
any_arg_replicated |= entry_args_same_across_replicas.back();
auto buffer_donor =
f.getArgAttrOfType<mlir::BoolAttr>(i, kJaxBufferDonor);
if (buffer_donor) {
if (options_.use_tuple_args) {
builder.AddBufferDonor(0, {i});
} else {
builder.AddBufferDonor(i, {});
}
}
auto aliasing_output =
f.getArgAttrOfType<mlir::IntegerAttr>(i, kTfAliasingOutput);
if (!aliasing_output) continue;
xla::ShapeIndex output_index;
if ((options_.return_tuple && entry_function) || f.getNumResults() != 1) {
output_index = {aliasing_output.getInt()};
} else {
if (aliasing_output.getInt() != 0) {
return f.emitError(
"Aliasing output must be 0 if only one output exists");
}
output_index = {};
}
if (options_.use_tuple_args) {
builder.SetUpAlias(output_index, 0,
{i});
} else {
builder.SetUpAlias(output_index, i,
{});
}
}
if (!any_arg_replicated) entry_args_same_across_replicas.clear();
ExtractShardingsFromFunction(f, &arg_shardings, &ret_shardings);
ExtractFrontendAttributesFromFunction(f, &arg_fe_attrs);
}
if (failed(LowerBasicBlockAsFunction(&f.front(), &builder, entry_function,
false, entry_args_same_across_replicas,
arg_shardings, ret_shardings,
arg_fe_attrs, &computation))) {
return failure();
}
if (auto execution_thread =
f->getAttrOfType<mlir::StringAttr>(kExecutionThread)) {
computation.mutable_proto()->mutable_computations(0)->set_execution_thread(
execution_thread.str());
}
for (int i = 0; i < f.getNumArguments(); ++i) {
if (auto pr =
f.getArgAttrOfType<mlir::ArrayAttr>(i, kMhloParameterReplication)) {
for (auto b : pr.getValue())
for (auto& instr : *computation.mutable_proto()
->mutable_computations(0)
->mutable_instructions())
if (instr.parameter_number() == i)
instr.mutable_parameter_replication()
->add_replicated_at_leaf_buffers(
mlir::cast<mlir::BoolAttr>(b).getValue());
}
}
lowered_computation_[f] = std::move(computation);
return success();
}
LogicalResult ConvertToHloModule::SetEntryTupleShapesAndLeafReplication(
Block* block, const std::vector<bool>& entry_args_same_across_replicas,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes,
std::vector<bool>* leaf_replication) {
arg_shapes->reserve(block->getNumArguments());
leaf_replication->reserve(block->getNumArguments());
for (BlockArgument& arg : block->getArguments()) {
arg_shapes->push_back(xla::TypeToShape(arg.getType()));
xla::Shape& arg_shape = arg_shapes->back();
auto layout_preference_status =
options_.layout_preference_fn ? options_.layout_preference_fn(arg_shape)
: XlaLayoutPreference::kNoPreference;
if (!layout_preference_status.ok())
return block->getParentOp()->emitError()
<< layout_preference_status.status().message();
auto arg_shape_status = options_.shape_representation_fn
? options_.shape_representation_fn(
arg_shape, false,
layout_preference_status.value())
: arg_shape;
if (!arg_shape_status.ok())
return block->getParentOp()->emitError()
<< arg_shape_status.status().message();
arg_shape = std::move(arg_shape_status.value());
if (entry_args_same_across_replicas.empty()) continue;
for (int i = 0, e = xla::ShapeUtil::GetLeafCount(arg_shape); i < e; ++i)
leaf_replication->push_back(
entry_args_same_across_replicas[arg.getArgNumber()]);
}
return success();
}
LogicalResult ConvertToHloModule::SetEntryTupleShardings(
Block* block, xla::XlaBuilder* builder,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::SmallVectorImpl<xla::Shape>* arg_shapes) {
if (!arg_shardings.empty() && SomeOptionalShardingsAreSet(arg_shardings)) {
xla::OpSharding sharding;
sharding.set_type(xla::OpSharding::TUPLE);
for (const auto& arg_sharding : llvm::enumerate(arg_shardings)) {
if (arg_sharding.value().has_value()) {
auto hlo_sharding = xla::HloSharding::FromProto(*arg_sharding.value());
if (!hlo_sharding.ok())
return block->getParentOp()->emitError()
<< hlo_sharding.status().message();
auto status = RewriteLayoutWithShardedShape(
hlo_sharding.value(), false,
options_.layout_preference_fn, options_.shape_representation_fn,
&(*arg_shapes)[arg_sharding.index()]);
if (!status.ok())
return block->getParentOp()->emitError() << status.message();
*sharding.add_tuple_shardings() = *arg_sharding.value();
} else {
xla::OpSharding fallback_sharding;
fallback_sharding.set_type(xla::OpSharding::REPLICATED);
*sharding.add_tuple_shardings() = fallback_sharding;
}
}
builder->SetSharding(sharding);
}
return success();
}
namespace {
xla::OpMetadata GetOpNameMetadataFromLocation(Value value) {
xla::OpMetadata m;
m.set_op_name(mhlo::GetDebugNameFromLocation(value.getLoc()));
return m;
}
}
LogicalResult ConvertToHloModule::LowerBasicBlockAsFunction(
Block* block, xla::XlaBuilder* builder, bool is_entry_function,
bool ensure_single_arg,
const std::vector<bool>& entry_args_same_across_replicas,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings,
llvm::ArrayRef<std::optional<xla::FrontendAttributes>> fe_attrs,
xla::XlaComputation* result, llvm::ArrayRef<mlir::Value> implicit_operands,
llvm::ArrayRef<mlir::Value> implicit_results) {
ValueLoweringMap lowering;
if (is_entry_function && options_.use_tuple_args) {
llvm::SmallVector<xla::Shape, 4> arg_shapes;
std::vector<bool> leaf_replication;
if (failed(SetEntryTupleShapesAndLeafReplication(
block, entry_args_same_across_replicas, &arg_shapes,
&leaf_replication)))
return failure();
if (failed(
SetEntryTupleShardings(block, builder, arg_shardings, &arg_shapes)))
return failure();
xla::Shape input_shape = xla::ShapeUtil::MakeTupleShape(arg_shapes);
auto tuple =
xla::Parameter(builder, 0, input_shape, kArgTuple, leaf_replication);
builder->ClearSharding();
for (BlockArgument& arg : block->getArguments()) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty() ? std::nullopt
: arg_shardings[arg.getArgNumber()]);
lowering[arg] = xla::GetTupleElement(tuple, arg.getArgNumber());
}
} else {
if (ensure_single_arg) {
llvm::SmallVector<xla::Shape, 4> arg_shapes;
auto args_size = block->getNumArguments() + implicit_operands.size();
arg_shapes.reserve(args_size);
for (BlockArgument& arg : block->getArguments())
arg_shapes.push_back(xla::TypeToShape(arg.getType()));
for (Value implicit_operand : implicit_operands)
arg_shapes.push_back(xla::TypeToShape(implicit_operand.getType()));
if (args_size > 1) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty()
? std::nullopt
: CreateTupleSharding(arg_shardings));
auto tuple = xla::Parameter(
builder, 0, xla::ShapeUtil::MakeTupleShape(arg_shapes), kArgTuple);
for (BlockArgument& arg : block->getArguments()) {
auto num = arg.getArgNumber();
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings[num]);
lowering[arg] = xla::GetTupleElement(tuple, num);
}
for (auto [implicit_index, implicit_operand] :
llvm::enumerate(implicit_operands)) {
int64_t arg_index = block->getNumArguments() + implicit_index;
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings[arg_index]);
lowering[implicit_operand] = xla::GetTupleElement(tuple, arg_index);
}
} else if (args_size == 1) {
xla::XlaScopedShardingAssignment scoped_sharding(
builder,
arg_shardings.empty() ? std::nullopt : arg_shardings.front());
mlir::Value arg = implicit_operands.empty() ? block->getArgument(0)
: implicit_operands.front();
xla::XlaScopedOpMetadataAssignment op_metadata(
builder, GetOpNameMetadataFromLocation(arg));
lowering[arg] = xla::Parameter(builder, 0, arg_shapes[0], kArgPrefix);
} else {
xla::Parameter(builder, 0, xla::ShapeUtil::MakeTupleShape(arg_shapes),
kArgEmptyTuple);
}
} else {
for (BlockArgument& arg : block->getArguments()) {
auto num = arg.getArgNumber();
xla::Shape shape = xla::TypeToShape(arg.getType());
xla::XlaScopedShardingAssignment scoped_sharding(
builder, arg_shardings.empty() ? std::nullopt : arg_shardings[num]);
if (!fe_attrs.empty() && fe_attrs[num]) {
builder->SetFrontendAttributes(*fe_attrs[num]);
}
xla::XlaScopedOpMetadataAssignment op_metadata(
builder, GetOpNameMetadataFromLocation(arg));
if (entry_args_same_across_replicas.empty()) {
lowering[arg] = xla::Parameter(builder, num, shape,
absl::StrCat(kArgPrefix, num));
} else {
lowering[arg] = xla::Parameter(
builder, num, shape, absl::StrCat(kArgPrefix, num),
std::vector<bool>(entry_args_same_across_replicas[num],
xla::ShapeUtil::GetLeafCount(shape)));
}
builder->ClearFrontendAttributes();
}
}
}
xla::XlaOp return_value;
for (auto& inst : *block)
if (failed(Lower(&inst, is_entry_function, ret_shardings, implicit_results,
builder, &lowering, &return_value)))
return failure();
auto computation_or =
return_value.valid() ? builder->Build(return_value) : builder->Build();
if (!computation_or.ok()) {
block->back().emitError() << computation_or.status().message();
return failure();
}
*result = std::move(computation_or.value());
return success();
}
LogicalResult ConvertToHloModule::LowerRegionAsComputation(
mlir::Region* region, xla::XlaComputation* func,
llvm::ArrayRef<mlir::Value> implicit_operands,
llvm::ArrayRef<mlir::Value> implicit_results, bool ensure_single_arg,
llvm::ArrayRef<std::optional<xla::OpSharding>> arg_shardings,
llvm::ArrayRef<std::optional<xla::OpSharding>> ret_shardings) {
std::unique_ptr<xla::XlaBuilder> builder = module_builder_.CreateSubBuilder(
absl::StrCat(kRegionPrefix, region_id_++));
return LowerBasicBlockAsFunction(
®ion->front(), builder.get(),
false,
ensure_single_arg,
{}, arg_shardings, ret_shardings,
{}, func, implicit_operands, implicit_results);
}
absl::Status PrepareForExport(mlir::ModuleOp module) {
bool hasShapeOps = false;
module.walk([&](Operation* op) {
hasShapeOps |= isa<shape::ShapeDialect>(op->getDialect());
return hasShapeOps ? WalkResult::interrupt() : WalkResult::advance();
});
mlir::PassManager pm(module.getContext());
pm.addNestedPass<mlir::func::FuncOp>(mhlo::createPrepareForExportPass());
if (hasShapeOps) {
pm.addNestedPass<mlir::func::FuncOp>(
mhlo::createSymbolicShapeOptimizationPass());
pm.addNestedPass<mlir::func::FuncOp>(mhlo::createShapeLegalizeToHloPass());
}
mlir::BaseScopedDiagnosticHandler handler(module.getContext());
(void)pm.run(module);
absl::Status s = handler.ConsumeStatus();
if (!s.ok()) {
s = absl::Status(
s.code(),
absl::StrCat("Unable to prepare for XLA export: ", s.message()));
}
return s;
}
}
absl::Status ConvertMlirHloToHlo(mlir::ModuleOp module,
xla::HloProto* hlo_proto,
MlirToHloConversionOptions options) {
mlir::PassManager pm(module->getContext());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
if (failed(pm.run(module))) {
return tsl::errors::Internal("Unable to convert StableHLO to MHLO");
}
TF_RETURN_IF_ERROR(PrepareForExport(module));
mlir::BaseScopedDiagnosticHandler diag_handler(module.getContext());
xla::XlaBuilder module_builder(kMain);
ConvertToHloModule converter(module, module_builder, options);
if (failed(converter.Run())) return diag_handler.ConsumeStatus();
xla::HloModuleProto hlo_module = converter.ConsumeMainProto();
StringRef module_name = module.getName() ? *module.getName() : kMain;
hlo_module.set_name(module_name.str());
if (auto cross_program_prefetches =
module->getAttrOfType<mlir::ArrayAttr>(kMhloCrossProgramPrefetches)) {
for (const auto& prefetch :
Convert_cross_program_prefetches(cross_program_prefetches)) {
*hlo_module.add_cross_program_prefetches() = std::move(prefetch);
}
}
if (auto is_dynamic = module->getAttrOfType<mlir::BoolAttr>(kMhloIsDynamic)) {
hlo_module.set_is_dynamic(is_dynamic.getValue());
}
if (auto frontend_attributes =
module->getAttrOfType<DictionaryAttr>(kMhloFrontendAttributes)) {
ConstructFrontendAttributesFromAttribute(
frontend_attributes, *hlo_module.mutable_frontend_attributes());
}
if (auto use_auto_spmd_partitioning =
module->getAttrOfType<mlir::BoolAttr>(kMhloUseAutoSpmdPartitioning)) {
hlo_module.set_use_auto_spmd_partitioning(
use_auto_spmd_partitioning.getValue());
}
if (auto spmd_output_sharding =
module->getAttrOfType<mlir::StringAttr>(kMhloSpmdOutputSharding)) {
*hlo_module.mutable_spmd_output_sharding() =
*xla::ConvertSharding(spmd_output_sharding.getValue());
}
if (auto input_output_alias =
module->getAttrOfType<mlir::ArrayAttr>(kMhloInputOutputAlias)) {
if (std::optional<xla::HloInputOutputAliasProto> input_output_alias_proto =
xla::ConvertInputOutputAlias(input_output_alias.getValue())) {
*hlo_module.mutable_input_output_alias() = *input_output_alias_proto;
}
}
if (auto spmd_parameters_sharding = module->getAttrOfType<mlir::ArrayAttr>(
kMhloSpmdParametersShardings)) {
for (const auto& sharding : spmd_parameters_sharding.getValue()) {
*hlo_module.add_spmd_parameters_shardings() = *xla::ConvertSharding(
mlir::cast<mlir::StringAttr>(sharding).getValue());
}
}
if (auto xla_entry_computation_parameter_layout =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationParameterLayouts)) {
auto status = mhlo::ExportModuleEntryComputationParameterLayouts(
xla_entry_computation_parameter_layout, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_parameter_tiles =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationParameterTiles)) {
auto status = mhlo::ExportModuleEntryComputationParameterTiles(
xla_entry_computation_parameter_tiles, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_result_layout =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationResultLayout)) {
auto status = mhlo::ExportModuleEntryComputationResultLayout(
xla_entry_computation_result_layout, hlo_module);
if (!status.ok()) return status;
}
if (auto xla_entry_computation_result_tiles =
module->getAttrOfType<mlir::ArrayAttr>(
kMhloXlaEntryComputationResultTiles)) {
auto status = mhlo::ExportModuleEntryComputationResultTiles(
xla_entry_computation_result_tiles, hlo_module);
if (!status.ok()) return status;
}
xla::StackFrameIndexProto stack_frame_index =
converter.BuildStackFramesIndexProto();
hlo_module.mutable_stack_frame_index()->Swap(&stack_frame_index);
hlo_proto->mutable_hlo_module()->Swap(&hlo_module);
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<xla::HloModule>> ConvertMlirHloToHloModule(
mlir::ModuleOp module, MlirToHloConversionOptions options) {
xla::HloProto hlo_proto;
TF_RETURN_IF_ERROR(ConvertMlirHloToHlo(module, &hlo_proto, options));
const xla::HloModuleProto& module_proto = hlo_proto.hlo_module();
TF_ASSIGN_OR_RETURN(xla::HloModuleConfig config,
xla::HloModule::CreateModuleConfigFromProto(
module_proto, xla::GetDebugOptionsFromFlags()));
mhlo::ExportHloModuleConfig(config, module);
return xla::HloModule::CreateFromProto(module_proto, config);
}
absl::Status BuildHloFromMlirHlo(mlir::Block& block, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
MlirToHloConversionOptions options) {
auto module = block.getParentOp()->getParentOfType<mlir::ModuleOp>();
TF_RETURN_IF_ERROR(PrepareForExport(module));
options.return_tuple = false;
options.use_tuple_args = false;
ConvertToHloModule converter(module, builder, options);
ConvertToHloModule::ValueLoweringMap lowering;
if (xla_params.size() != block.getArguments().size())
return tsl::errors::Internal("xla_params size (", xla_params.size(),
") != block arguments size (",
block.getArguments().size(), ")");
for (BlockArgument& arg : block.getArguments()) {
auto num = arg.getArgNumber();
lowering[arg] = xla_params[num];
}
mlir::BaseScopedDiagnosticHandler diag_handler(module.getContext());
for (auto& inst : block) {
if (isa<mhlo::ReturnOp, mlir::func::ReturnOp>(inst)) {
returns.resize(inst.getNumOperands());
for (OpOperand& ret : inst.getOpOperands()) {
unsigned index = ret.getOperandNumber();
xla::XlaOp operand;
if (failed(GetXlaOp(ret.get(), lowering, &operand, &inst)))
return diag_handler.ConsumeStatus();
returns[index] = operand;
}
} else {
xla::XlaOp return_value;
if (failed(converter.Lower(&inst, true,
{},
{}, &builder, &lowering,
&return_value)))
return diag_handler.ConsumeStatus();
}
}
return absl::OkStatus();
}
absl::Status ConvertMlirHloToHlo(mlir::ModuleOp module,
::xla::HloProto* hlo_proto,
bool use_tuple_args, bool return_tuple,
MlirToHloConversionOptions options) {
options.use_tuple_args = use_tuple_args;
options.return_tuple = return_tuple;
return ConvertMlirHloToHlo(module, hlo_proto, options);
}
} | #include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "stablehlo/dialect/Register.h"
#include "xla/mlir/utils/error_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace mlir {
namespace {
using testing::_;
using testing::AllOf;
using testing::HasSubstr;
using tsl::testing::StatusIs;
TEST(ConvertMlirHloToHloModuleTest, PropagatesDiagnostics) {
const std::string mlir_source = R"mlir(
func.func @main(%arg0: tensor<?xf32>, %arg1: tensor<1xindex>, %arg2: tensor<1xindex>, %arg3: tensor<1xindex>) -> tensor<?xf32> {
%0 = shape.const_shape [14, 1] : tensor<2xindex>
%1 = "stablehlo.real_dynamic_slice"(%arg0, %arg1, %arg2, %arg3) : (tensor<?xf32>, tensor<1xindex>, tensor<1xindex>, tensor<1xindex>) -> tensor<?xf32>
func.return %1 : tensor<?xf32>
}
)mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect, mlir::shape::ShapeDialect>();
mlir::stablehlo::registerAllDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> module;
{
mlir::BaseScopedDiagnosticHandler handler(&context);
module = mlir::parseSourceString<mlir::ModuleOp>(mlir_source, &context);
TF_ASSERT_OK(handler.ConsumeStatus());
}
ASSERT_THAT(ConvertMlirHloToHloModule(*module),
StatusIs(_, AllOf(HasSubstr("Unable to prepare for XLA export"),
HasSubstr("real_dynamic_slice"))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad4e9a72-a8db-425a-9ae6-e50292248763 | cpp | abseil/abseil-cpp | log_uniform_int_distribution | absl/random/log_uniform_int_distribution.h | absl/random/log_uniform_int_distribution_test.cc | #ifndef ABSL_RANDOM_LOG_UNIFORM_INT_DISTRIBUTION_H_
#define ABSL_RANDOM_LOG_UNIFORM_INT_DISTRIBUTION_H_
#include <algorithm>
#include <cassert>
#include <cmath>
#include <istream>
#include <limits>
#include <ostream>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/random/internal/fastmath.h"
#include "absl/random/internal/generate_real.h"
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/traits.h"
#include "absl/random/uniform_int_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename IntType = int>
class log_uniform_int_distribution {
private:
using unsigned_type =
typename random_internal::make_unsigned_bits<IntType>::type;
public:
using result_type = IntType;
class param_type {
public:
using distribution_type = log_uniform_int_distribution;
explicit param_type(
result_type min = 0,
result_type max = (std::numeric_limits<result_type>::max)(),
result_type base = 2)
: min_(min),
max_(max),
base_(base),
range_(static_cast<unsigned_type>(max_) -
static_cast<unsigned_type>(min_)),
log_range_(0) {
assert(max_ >= min_);
assert(base_ > 1);
if (base_ == 2) {
log_range_ = (std::min)(random_internal::BitWidth(range()),
std::numeric_limits<unsigned_type>::digits);
} else {
const double inv_log_base = 1.0 / std::log(static_cast<double>(base_));
const double log_range = std::log(static_cast<double>(range()) + 0.5);
log_range_ = static_cast<int>(std::ceil(inv_log_base * log_range));
}
}
result_type(min)() const { return min_; }
result_type(max)() const { return max_; }
result_type base() const { return base_; }
friend bool operator==(const param_type& a, const param_type& b) {
return a.min_ == b.min_ && a.max_ == b.max_ && a.base_ == b.base_;
}
friend bool operator!=(const param_type& a, const param_type& b) {
return !(a == b);
}
private:
friend class log_uniform_int_distribution;
int log_range() const { return log_range_; }
unsigned_type range() const { return range_; }
result_type min_;
result_type max_;
result_type base_;
unsigned_type range_;
int log_range_;
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::log_uniform_int_distribution<> must be "
"parameterized using an integral type.");
};
log_uniform_int_distribution() : log_uniform_int_distribution(0) {}
explicit log_uniform_int_distribution(
result_type min,
result_type max = (std::numeric_limits<result_type>::max)(),
result_type base = 2)
: param_(min, max, base) {}
explicit log_uniform_int_distribution(const param_type& p) : param_(p) {}
void reset() {}
template <typename URBG>
result_type operator()(URBG& g) {
return (*this)(g, param_);
}
template <typename URBG>
result_type operator()(URBG& g,
const param_type& p) {
return static_cast<result_type>((p.min)() + Generate(g, p));
}
result_type(min)() const { return (param_.min)(); }
result_type(max)() const { return (param_.max)(); }
result_type base() const { return param_.base(); }
param_type param() const { return param_; }
void param(const param_type& p) { param_ = p; }
friend bool operator==(const log_uniform_int_distribution& a,
const log_uniform_int_distribution& b) {
return a.param_ == b.param_;
}
friend bool operator!=(const log_uniform_int_distribution& a,
const log_uniform_int_distribution& b) {
return a.param_ != b.param_;
}
private:
template <typename URNG>
unsigned_type Generate(URNG& g,
const param_type& p);
param_type param_;
};
template <typename IntType>
template <typename URBG>
typename log_uniform_int_distribution<IntType>::unsigned_type
log_uniform_int_distribution<IntType>::Generate(
URBG& g,
const param_type& p) {
const int e = absl::uniform_int_distribution<int>(0, p.log_range())(g);
if (e == 0) {
return 0;
}
const int d = e - 1;
unsigned_type base_e, top_e;
if (p.base() == 2) {
base_e = static_cast<unsigned_type>(1) << d;
top_e = (e >= std::numeric_limits<unsigned_type>::digits)
? (std::numeric_limits<unsigned_type>::max)()
: (static_cast<unsigned_type>(1) << e) - 1;
} else {
const double r = std::pow(static_cast<double>(p.base()), d);
const double s = (r * static_cast<double>(p.base())) - 1.0;
base_e =
(r > static_cast<double>((std::numeric_limits<unsigned_type>::max)()))
? (std::numeric_limits<unsigned_type>::max)()
: static_cast<unsigned_type>(r);
top_e =
(s > static_cast<double>((std::numeric_limits<unsigned_type>::max)()))
? (std::numeric_limits<unsigned_type>::max)()
: static_cast<unsigned_type>(s);
}
const unsigned_type lo = (base_e >= p.range()) ? p.range() : base_e;
const unsigned_type hi = (top_e >= p.range()) ? p.range() : top_e;
return absl::uniform_int_distribution<result_type>(
static_cast<result_type>(lo), static_cast<result_type>(hi))(g);
}
template <typename CharT, typename Traits, typename IntType>
std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os,
const log_uniform_int_distribution<IntType>& x) {
using stream_type =
typename random_internal::stream_format_type<IntType>::type;
auto saver = random_internal::make_ostream_state_saver(os);
os << static_cast<stream_type>((x.min)()) << os.fill()
<< static_cast<stream_type>((x.max)()) << os.fill()
<< static_cast<stream_type>(x.base());
return os;
}
template <typename CharT, typename Traits, typename IntType>
std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is,
log_uniform_int_distribution<IntType>& x) {
using param_type = typename log_uniform_int_distribution<IntType>::param_type;
using result_type =
typename log_uniform_int_distribution<IntType>::result_type;
using stream_type =
typename random_internal::stream_format_type<IntType>::type;
stream_type min;
stream_type max;
stream_type base;
auto saver = random_internal::make_istream_state_saver(is);
is >> min >> max >> base;
if (!is.fail()) {
x.param(param_type(static_cast<result_type>(min),
static_cast<result_type>(max),
static_cast<result_type>(base)));
}
return is;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/log_uniform_int_distribution.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <random>
#include <sstream>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/strip.h"
namespace {
template <typename IntType>
class LogUniformIntDistributionTypeTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_SUITE(LogUniformIntDistributionTypeTest, IntTypes);
TYPED_TEST(LogUniformIntDistributionTypeTest, SerializeTest) {
using param_type =
typename absl::log_uniform_int_distribution<TypeParam>::param_type;
using Limits = std::numeric_limits<TypeParam>;
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const auto& param : {
param_type(0, 1),
param_type(0, 2),
param_type(0, 2, 10),
param_type(9, 32, 4),
param_type(1, 101, 10),
param_type(1, Limits::max() / 2),
param_type(0, Limits::max() - 1),
param_type(0, Limits::max(), 2),
param_type(0, Limits::max(), 10),
param_type(Limits::min(), 0),
param_type(Limits::lowest(), Limits::max()),
param_type(Limits::min(), Limits::max()),
}) {
const auto min = param.min();
const auto max = param.max();
const auto base = param.base();
absl::log_uniform_int_distribution<TypeParam> before(min, max, base);
EXPECT_EQ(before.min(), param.min());
EXPECT_EQ(before.max(), param.max());
EXPECT_EQ(before.base(), param.base());
{
absl::log_uniform_int_distribution<TypeParam> via_param(param);
EXPECT_EQ(via_param, before);
}
std::stringstream ss;
ss << before;
absl::log_uniform_int_distribution<TypeParam> after(3, 6, 17);
EXPECT_NE(before.max(), after.max());
EXPECT_NE(before.base(), after.base());
EXPECT_NE(before.param(), after.param());
EXPECT_NE(before, after);
ss >> after;
EXPECT_EQ(before.min(), after.min());
EXPECT_EQ(before.max(), after.max());
EXPECT_EQ(before.base(), after.base());
EXPECT_EQ(before.param(), after.param());
EXPECT_EQ(before, after);
auto sample_min = after.max();
auto sample_max = after.min();
for (int i = 0; i < kCount; i++) {
auto sample = after(gen);
EXPECT_GE(sample, after.min());
EXPECT_LE(sample, after.max());
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
}
LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
}
}
using log_uniform_i32 = absl::log_uniform_int_distribution<int32_t>;
class LogUniformIntChiSquaredTest
: public testing::TestWithParam<log_uniform_i32::param_type> {
public:
double ChiSquaredTestImpl();
absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6};
};
double LogUniformIntChiSquaredTest::ChiSquaredTestImpl() {
using absl::random_internal::kChiSquared;
const auto& param = GetParam();
const bool is_2 = (param.base() == 2);
const double base_log = 1.0 / std::log(param.base());
const auto bucket_index = [base_log, is_2, ¶m](int32_t x) {
uint64_t y = static_cast<uint64_t>(x) - param.min();
return (y == 0) ? 0
: is_2 ? static_cast<int>(1 + std::log2(y))
: static_cast<int>(1 + std::log(y) * base_log);
};
const int max_bucket = bucket_index(param.max());
const size_t trials = 15 + (max_bucket + 1) * 10;
log_uniform_i32 dist(param);
std::vector<int64_t> buckets(max_bucket + 1);
for (size_t i = 0; i < trials; ++i) {
const auto sample = dist(rng_);
ABSL_ASSERT(sample <= dist.max());
ABSL_ASSERT(sample >= dist.min());
int bucket = bucket_index(sample);
ABSL_ASSERT(bucket <= max_bucket);
++buckets[bucket];
}
const int dof = buckets.size() - 1;
const double expected = trials / static_cast<double>(buckets.size());
const double threshold = absl::random_internal::ChiSquareValue(dof, 0.98);
double chi_square = absl::random_internal::ChiSquareWithExpected(
std::begin(buckets), std::end(buckets), expected);
const double p = absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
LOG(INFO) << "values";
for (size_t i = 0; i < buckets.size(); i++) {
LOG(INFO) << i << ": " << buckets[i];
}
LOG(INFO) << "trials=" << trials << "\n"
<< kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
<< p << ")\n"
<< kChiSquared << " @ 0.98 = " << threshold;
}
return p;
}
TEST_P(LogUniformIntChiSquaredTest, MultiTest) {
const int kTrials = 5;
int failures = 0;
for (int i = 0; i < kTrials; i++) {
double p_value = ChiSquaredTestImpl();
if (p_value < 0.005) {
failures++;
}
}
EXPECT_LE(failures, 4);
}
std::vector<log_uniform_i32::param_type> GenParams() {
using Param = log_uniform_i32::param_type;
using Limits = std::numeric_limits<int32_t>;
return std::vector<Param>{
Param{0, 1, 2},
Param{1, 1, 2},
Param{0, 2, 2},
Param{0, 3, 2},
Param{0, 4, 2},
Param{0, 9, 10},
Param{0, 10, 10},
Param{0, 11, 10},
Param{1, 10, 10},
Param{0, (1 << 8) - 1, 2},
Param{0, (1 << 8), 2},
Param{0, (1 << 30) - 1, 2},
Param{-1000, 1000, 10},
Param{0, Limits::max(), 2},
Param{0, Limits::max(), 3},
Param{0, Limits::max(), 10},
Param{Limits::min(), 0},
Param{Limits::min(), Limits::max(), 2},
};
}
std::string ParamName(
const ::testing::TestParamInfo<log_uniform_i32::param_type>& info) {
const auto& p = info.param;
std::string name =
absl::StrCat("min_", p.min(), "__max_", p.max(), "__base_", p.base());
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_SUITE_P(All, LogUniformIntChiSquaredTest,
::testing::ValuesIn(GenParams()), ParamName);
TEST(LogUniformIntDistributionTest, StabilityTest) {
using testing::ElementsAre;
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
std::vector<int> output(6);
{
absl::log_uniform_int_distribution<int32_t> dist(0, 256);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
EXPECT_THAT(output, ElementsAre(256, 66, 4, 6, 57, 103));
}
urbg.reset();
{
absl::log_uniform_int_distribution<int32_t> dist(0, 256, 10);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
EXPECT_THAT(output, ElementsAre(8, 4, 0, 0, 0, 69));
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/log_uniform_int_distribution.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/log_uniform_int_distribution_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
934905c1-51aa-405d-a72f-d36f8d32e236 | cpp | google/arolla | qtype_utils | arolla/codegen/qtype_utils.cc | arolla/codegen/qtype_utils_test.cc | #include "arolla/codegen/qtype_utils.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/qtype/qtype.h"
namespace arolla::codegen {
std::vector<std::pair<std::string, QTypePtr>>
NamedQTypeVectorBuilder::Build() && {
return std::move(types_);
}
void NamedQTypeVectorBuilder::AddFromCommonPrefixWithPrevious(
size_t length, const char* suffix, QTypePtr qtype) {
std::string suffix_str(suffix);
CHECK_LE(suffix_str.size(), length);
size_t prefix_length = length - suffix_str.size();
absl::string_view previous_name =
types_.empty() ? "" : absl::string_view(types_.back().first);
CHECK_LE(prefix_length, previous_name.size());
types_.emplace_back(
absl::StrCat(previous_name.substr(0, prefix_length), suffix_str), qtype);
}
} | #include "arolla/codegen/qtype_utils.h"
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla::codegen {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Pair;
TEST(NamedQTypeVectorBuilderTest, NamedQTypeVectorBuilder) {
{
SCOPED_TRACE("Empty builder");
NamedQTypeVectorBuilder builder;
EXPECT_THAT(std::move(builder).Build(), IsEmpty());
}
{
SCOPED_TRACE("Single element");
NamedQTypeVectorBuilder builder;
builder.AddFromCommonPrefixWithPrevious(3, "foo", GetQType<int32_t>());
EXPECT_THAT(std::move(builder).Build(),
ElementsAre(Pair("foo", GetQType<int32_t>())));
}
{
SCOPED_TRACE("Many elements no prefix");
NamedQTypeVectorBuilder builder;
builder.AddFromCommonPrefixWithPrevious(3, "abc", GetQType<int32_t>());
builder.AddFromCommonPrefixWithPrevious(4, "defx", GetQType<double>());
builder.AddFromCommonPrefixWithPrevious(2, "gh",
GetQType<OptionalValue<float>>());
EXPECT_THAT(std::move(builder).Build(),
ElementsAre(Pair("abc", GetQType<int32_t>()),
Pair("defx", GetQType<double>()),
Pair("gh", GetQType<OptionalValue<float>>())));
}
{
SCOPED_TRACE("Many elements common prefix");
NamedQTypeVectorBuilder builder;
builder.AddFromCommonPrefixWithPrevious(3, "abc", GetQType<int32_t>());
builder.AddFromCommonPrefixWithPrevious(4, "de", GetQType<double>());
builder.AddFromCommonPrefixWithPrevious(5, "gh",
GetQType<OptionalValue<float>>());
EXPECT_THAT(std::move(builder).Build(),
ElementsAre(Pair("abc", GetQType<int32_t>()),
Pair("abde", GetQType<double>()),
Pair("abdgh", GetQType<OptionalValue<float>>())));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/qtype_utils.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/qtype_utils_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
2e1a0094-cd3f-49b9-9457-3114eaec0af7 | cpp | tensorflow/tensorflow | lstm_utils | tensorflow/compiler/mlir/lite/utils/lstm_utils.cc | tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <algorithm>
#include <optional>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/utils/utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
namespace mlir {
namespace TFL {
namespace {
Value CreateI32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
int32_t val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateF32SplatConst(OpBuilder* builder, ArrayRef<int64_t> shape,
float val, mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto attr = DenseElementsAttr::get(type, val);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreatTfF32ConstOp(OpBuilder* builder, ArrayRef<int64_t> shape, float val,
mlir::Location location) {
auto type = RankedTensorType::get(shape, builder->getF32Type());
auto ele_type = RankedTensorType::get({1}, builder->getF32Type());
auto attr = DenseElementsAttr::get(ele_type, val);
return builder->create<TF::ConstOp>(location, type, attr);
}
Value CreateI64DenseConst(OpBuilder* builder, ArrayRef<int64_t> shape,
ArrayRef<int64_t> values, mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(shape.size()),
builder->getIntegerType(64));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateI32DenseConst(OpBuilder* builder, ArrayRef<int32_t> values,
mlir::Location location) {
auto type = RankedTensorType::get(static_cast<int>(values.size()),
builder->getIntegerType(32));
auto attr = DenseElementsAttr::get(type, values);
return builder->create<arith::ConstantOp>(location, type, attr);
}
Value CreateNoneValue(OpBuilder* builder, mlir::Location location) {
return builder->create<TFL::NoValueOp>(location, builder->getNoneType(),
builder->getUnitAttr());
}
Value Transpose(OpBuilder* builder, Value value_to_transpose,
SmallVector<int32_t, 4> perm, RankedTensorType original_type,
mlir::Location location) {
auto perm_op = CreateI32DenseConst(builder, perm, location);
auto transpose_type = original_type;
auto transpose_shape =
llvm::to_vector<8>(llvm::map_range(perm, [transpose_type](int32_t dim) {
return transpose_type.getDimSize(dim);
}));
auto elem_type = transpose_type.getElementType();
auto result_type = RankedTensorType::get(transpose_shape, elem_type);
return builder->create<TF::TransposeOp>(location, result_type,
value_to_transpose, perm_op);
}
Value Transpose2D(OpBuilder* builder, Value value_to_transpose,
RankedTensorType type, mlir::Location location) {
SmallVector<int32_t, 4> perm = {1, 0};
return Transpose(builder, value_to_transpose, perm, type, location);
}
Value Reverse(OpBuilder* builder, Value value_to_reverse, int axis,
RankedTensorType type, mlir::Location location) {
auto axis_op = CreateI32SplatConst(builder, {1}, axis, location);
return builder->create<TF::ReverseV2Op>(location, type, value_to_reverse,
axis_op);
}
ArrayRef<int64_t> GetRankedTensorShape(Value value) {
return mlir::cast<RankedTensorType>(value.getType()).getShape();
}
Value SliceRankedTensor(OpBuilder* builder, Value input,
ArrayRef<int64_t> begin_shape,
ArrayRef<int64_t> begin_values,
ArrayRef<int64_t> size_shape,
ArrayRef<int64_t> size_values,
mlir::Location location) {
ArrayRef<int64_t> input_shape = GetRankedTensorShape(input);
for (int i = 0, end = input_shape.size(); i < end; i++) {
if (begin_values[i] < 0 ||
(begin_values[i] + size_values[i] > input_shape[i])) {
return CreateF32SplatConst(builder, size_shape, 0, location);
}
}
auto slice_i2c_begin =
CreateI64DenseConst(builder, begin_shape, begin_values, location);
auto slice_i2c_size =
CreateI64DenseConst(builder, size_shape, size_values, location);
return builder->create<TF::SliceOp>(
location,
RankedTensorType::get(
size_values,
mlir::cast<RankedTensorType>(input.getType()).getElementType()),
input, slice_i2c_begin, slice_i2c_size);
}
Value CreateStridedSliceOp(mlir::Location loc, ArrayRef<int64_t> output_shape,
Value input, ArrayRef<int32_t> begin,
ArrayRef<int32_t> end, ArrayRef<int32_t> strides,
int64_t begin_mask, int64_t end_mask,
int64_t ellipsis_mask, int64_t new_axis_mask,
int64_t shrink_axis_mask, OpBuilder* builder) {
auto output_type = RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input.getType()).getElementType());
auto begin_tensor = CreateI32DenseConst(builder, begin, loc);
auto end_tensor = CreateI32DenseConst(builder, end, loc);
auto strides_tensor = CreateI32DenseConst(builder, strides, loc);
return builder->create<TF::StridedSliceOp>(
loc, output_type, input, begin_tensor, end_tensor, strides_tensor,
builder->getI64IntegerAttr(begin_mask),
builder->getI64IntegerAttr(end_mask),
builder->getI64IntegerAttr(ellipsis_mask),
builder->getI64IntegerAttr(new_axis_mask),
builder->getI64IntegerAttr(shrink_axis_mask));
}
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToCellGate() {
SmallVector<int64_t, 2> begin_i2c_values = {0, 0};
input2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2c_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToInputGate() {
SmallVector<int64_t, 2> begin_i2i_values = {n_cell_, 0};
input2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_i2i_values,
weight_slice_shape_,
weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToForgetGate() {
int input_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_i2f_values = {input_forget_start, 0};
input2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2f_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForInputToOutputGate() {
int input_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_i2o_values = {input_output_start, 0};
input2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_i2o_values,
weight_slice_shape_, weight_slice_size_input_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToCellGate() {
SmallVector<int64_t, 2> begin_rec2c_values = {0, n_input_};
rec2cell_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2c_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToInputGate() {
SmallVector<int64_t, 2> begin_rec2i_values = {n_cell_, n_input_};
rec2input_ = couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, weight_transposed_,
weight_slice_shape_, begin_rec2i_values,
weight_slice_shape_,
weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToForgetGate() {
int rec_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 2> begin_rec2f_values = {rec_forget_start, n_input_};
rec2forget_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2f_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetWeightForRecurrentToOutputGate() {
int rec_output_start = couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 2> begin_rec2o_values = {rec_output_start, n_input_};
rec2output_ = SliceRankedTensor(
&builder_, weight_transposed_, weight_slice_shape_, begin_rec2o_values,
weight_slice_shape_, weight_slice_size_recurrent_values_,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToCellGate() {
SmallVector<int64_t, 1> begin_bias2c_values = {0};
bias2cell_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2c_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToInputGate() {
SmallVector<int64_t, 1> begin_bias2i_values = {n_cell_};
bias2input_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2i_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToForgetGate() {
int bias_forget_start = couple_input_forget_gates_ ? n_cell_ : 2 * n_cell_;
SmallVector<int64_t, 1> begin_bias2f_values = {bias_forget_start};
bias2forget_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2f_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetBiasToOutputGate() {
int bias_output_start =
couple_input_forget_gates_ ? 2 * n_cell_ : 3 * n_cell_;
SmallVector<int64_t, 1> begin_bias2o_values = {bias_output_start};
bias2output_ = SliceRankedTensor(&builder_, bias_, bias_slice_shape_,
begin_bias2o_values, bias_slice_shape_,
bias_size_values_, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjection() {
SmallVector<int64_t, 2> projection_slice_shape = {
1, num_cols_projection_transposed_};
SmallVector<int64_t, 2> projection_slice_size_values = {n_output_, n_cell_};
SmallVector<int64_t, 2> projection_slice_begin_values = {0, 0};
proj_weight_ =
!projection_
? none_
: SliceRankedTensor(
&builder_, projection_transposed_, projection_slice_shape,
projection_slice_begin_values, projection_slice_shape,
projection_slice_size_values, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetProjectionBias() {
proj_bias_ = !projection_type_
? none_
: CreateF32SplatConst(&builder_, {n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputActivationState() {
input_activation_state_ = CreateF32SplatConst(&builder_, {1, n_output_}, 0,
fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputCellState() {
input_cell_state_ =
CreateF32SplatConst(&builder_, {1, n_cell_}, 0, fused_func_op_.getLoc());
}
void ConvertLSTMCellSimpleToFusedLSTM::SetCellLayerNormCoefficients() {
cell_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetInputLayerNormCoefficients() {
input_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetForgetLayerNormCoefficients() {
forget_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::SetOutputLayerNormCoefficients() {
output_layer_norm_coefficients_ = none_;
}
void ConvertLSTMCellSimpleToFusedLSTM::GenerateFusedOpOperands() {
weight_transposed_ =
Transpose2D(&builder_, weight_, weight_type_, fused_func_op_.getLoc());
projection_transposed_ = Transpose2D(&builder_, projection_, projection_type_,
fused_func_op_.getLoc());
none_ = CreateNoneValue(&builder_, fused_func_op_.getLoc());
SetWeightForInputToCellGate();
SetWeightForInputToInputGate();
SetWeightForInputToForgetGate();
SetWeightForInputToOutputGate();
SetWeightForRecurrentToCellGate();
SetWeightForRecurrentToInputGate();
SetWeightForRecurrentToForgetGate();
SetWeightForRecurrentToOutputGate();
SetBiasToCellGate();
SetBiasToInputGate();
SetBiasToForgetGate();
SetBiasToOutputGate();
SetProjection();
SetProjectionBias();
SetInputActivationState();
SetInputCellState();
SetCellLayerNormCoefficients();
SetInputLayerNormCoefficients();
SetForgetLayerNormCoefficients();
SetOutputLayerNormCoefficients();
}
void ConvertLSTMCellSimpleToFusedLSTM::UpdateFuncSignature() {
SmallVector<int64_t, 2> output_shape{1, tensorflow::kTFDynamicSize};
auto input_types = fused_func_op_.getFunctionType().getInputs();
auto output_type = tensorflow::GetTypeFromTFTensorShape(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
fused_func_op_.setType(mlir::FunctionType::get(fused_func_op_.getContext(),
input_types, output_type));
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::RewriteFunc() {
LogicalResult result = Initialize();
if (failed(result)) {
return result;
}
UpdateFuncSignature();
GenerateFusedOpOperands();
SmallVector<int64_t, 2> output_shape = {1, n_output_};
auto result_type = mlir::RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
lstm_ = builder_.create<mlir::TFL::LSTMOp>(
fused_func_op_.getLoc(), result_type, input_, input2input_, input2forget_,
input2cell_, input2output_, rec2input_, rec2forget_, rec2cell_,
rec2output_, none_,
none_,
none_, bias2input_, bias2forget_, bias2cell_,
bias2output_, proj_weight_, proj_bias_, input_activation_state_,
input_cell_state_, input_layer_norm_coefficients_,
forget_layer_norm_coefficients_, cell_layer_norm_coefficients_,
output_layer_norm_coefficients_, builder_.getStringAttr("TANH"),
builder_.getF32FloatAttr(10.0), builder_.getF32FloatAttr(0.0),
mlir::TFL::LSTMKernelTypeAttr::get(builder_.getContext(),
mlir::TFL::LSTMKernelType::FULL),
mlir::BoolAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr());
SmallVector<int64_t, 2> func_output_shape = {1, tensorflow::kTFDynamicSize};
auto func_result_type = tensorflow::GetTypeFromTFTensorShape(
func_output_shape,
mlir::cast<RankedTensorType>(input_.getType()).getElementType());
auto tensor_cast = builder_.create<mlir::tensor::CastOp>(
fused_func_op_.getLoc(), func_result_type, lstm_.getResult());
builder_.create<mlir::func::ReturnOp>(fused_func_op_.getLoc(),
tensor_cast.getResult());
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::InitializeFromFuncAttributes() {
auto attr = fused_func_op_->getAttrOfType<StringAttr>(kTFImplements);
if (!attr) {
return fused_func_op_.emitError()
<< "Invalid function attribute, expected " << kTFImplements
<< " attribute "
"not found";
}
llvm::SmallVector<llvm::StringRef, 4> attr_tokens;
attr.getValue().split(attr_tokens, ",");
if (attr_tokens.empty()) {
return fused_func_op_.emitError()
<< kTFImplements << " attribute should be set";
}
if (GetCompositeOpName().str() != attr_tokens[0]) {
return fused_func_op_.emitError()
<< "Unexpected interface for the composite op. Expected: "
<< GetCompositeOpName() << " Actual: " << attr_tokens[0];
}
couple_input_forget_gates_ =
std::find(attr_tokens.begin() + 1, attr_tokens.end(),
kCoupleInputForgetGates) != attr_tokens.end();
return success();
}
LogicalResult ConvertLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(InitializeFromFuncAttributes())) {
return fused_func_op_.emitError()
<< "Expected function attributes were not set on the function "
"encapsulating the composite op";
}
num_gates_ = couple_input_forget_gates_ ? 3 : 4;
input_ = fused_func_op_.getArgument(0);
bias_ = fused_func_op_.getArgument(2);
weight_ = fused_func_op_.getArgument(1);
weight_type_ = mlir::cast<RankedTensorType>(weight_.getType());
if (weight_type_.getRank() != 2) {
return fused_func_op_.emitError() << "The weight tensor was not of rank 2";
}
if (weight_type_.getDimSize(1) % num_gates_ != 0) {
return fused_func_op_.emitError()
<< "Invalid dimension 1 of weight tensor, "
"should be divisible by the number of gates";
}
n_cell_ = weight_type_.getDimSize(1) / num_gates_;
projection_ = fused_func_op_.getArgument(3);
projection_type_ = mlir::cast<RankedTensorType>(projection_.getType());
if (projection_type_.getRank() != 2) {
n_output_ = n_cell_;
} else {
n_output_ = projection_type_.getDimSize(1);
}
n_input_ = weight_type_.getDimSize(0) - n_output_;
num_cols_weight_transposed_ = weight_type_.getDimSize(0);
num_cols_projection_transposed_ = projection_type_.getDimSize(0);
bias_slice_shape_ = {n_cell_};
bias_size_values_ = {n_cell_};
weight_slice_shape_ = {1, num_cols_weight_transposed_};
weight_slice_size_input_values_ = {n_cell_, n_input_};
weight_slice_size_recurrent_values_ = {n_cell_, n_output_};
return success();
}
LogicalResult ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::Initialize() {
if (failed(ConvertLSTMCellSimpleToFusedLSTM::Initialize())) {
return fused_func_op_.emitError()
<< "Specified LayerNormalizedLSTMCellSimple was not of the expected "
"interface and cannot not be converted to the fused LSTM op";
}
layer_norm_scale_ = fused_func_op_.getArgument(4);
layer_norm_scale_type_ =
mlir::cast<RankedTensorType>(layer_norm_scale_.getType());
if (layer_norm_scale_type_.getRank() != 1) {
return fused_func_op_.emitError()
<< "The layer_norm_scale tensor was not of rank 1";
}
layer_norm_slice_shape_ = {n_cell_};
layer_norm_size_values_ = {n_cell_};
return success();
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetCellLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_cell_layer_norm_values = {0};
cell_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_cell_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetInputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_input_layer_norm_values = {n_cell_};
input_layer_norm_coefficients_ =
couple_input_forget_gates_
? none_
: SliceRankedTensor(
&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_input_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetForgetLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_forget_layer_norm_values = {2 * n_cell_};
forget_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_forget_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
void ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM::
SetOutputLayerNormCoefficients() {
SmallVector<int64_t, 1> begin_output_layer_norm_values = {3 * n_cell_};
output_layer_norm_coefficients_ =
SliceRankedTensor(&builder_, layer_norm_scale_, layer_norm_slice_shape_,
begin_output_layer_norm_values, layer_norm_slice_shape_,
layer_norm_size_values_, fused_func_op_.getLoc());
}
TF::ConstOp Create1DConstantOp(const std::vector<int>& value, Location loc,
OpBuilder* builder) {
auto type =
mlir::RankedTensorType::get(value.size(), builder->getIntegerType(32));
auto dense_values = mlir::DenseIntElementsAttr::get(type, value);
return builder->create<TF::ConstOp>(loc, dense_values);
}
TF::ConstOp CreateScalarConstantOp(int value, Location loc,
OpBuilder* builder) {
return builder->create<TF::ConstOp>(loc, builder->getI32IntegerAttr(value));
}
TF::ReshapeOp CreateFlattenOP(const Value& input, Location loc,
OpBuilder* builder) {
auto output_shape = Create1DConstantOp({-1}, loc, builder);
return builder->create<mlir::TF::ReshapeOp>(
loc,
input,
output_shape.getResult());
}
LogicalResult CreateEqualSizeSplitVOp(Value input, int axis, int splits,
Location loc, OpBuilder* builder,
Operation** result) {
auto input_type = mlir::cast<RankedTensorType>(input.getType());
SmallVector<int64_t, 4> output_shape;
int size_of_splits;
if (input_type.getRank() < axis || axis < 0) return failure();
for (int i = 0; i < input_type.getRank(); ++i) {
int64_t dim = input_type.getDimSize(i);
if (i == axis) {
if (dim % splits != 0) {
return failure();
}
size_of_splits = dim / splits;
output_shape.push_back(size_of_splits);
} else {
output_shape.push_back(dim);
}
}
SmallVector<mlir::Type, 4> output_types;
for (int i = 0; i < splits; ++i) {
output_types.push_back(
mlir::RankedTensorType::get(output_shape, input_type.getElementType()));
}
auto size_of_splits_op = Create1DConstantOp(
{size_of_splits, size_of_splits, size_of_splits, size_of_splits}, loc,
builder);
auto axis_op = CreateScalarConstantOp(axis, loc, builder);
*result = builder->create<TF::SplitVOp>(loc, output_types, input,
size_of_splits_op.getResult(),
axis_op.getResult());
return success();
}
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder) {
return ConvertKerasLSTMLayer(func_op, builder, false);
}
LogicalResult ConvertKerasLSTMLayer(mlir::func::FuncOp func_op,
OpBuilder* builder, bool indy) {
Value input = func_op.getArgument(0);
Value output_init_state = func_op.getArgument(1);
Value hidden_init_state = func_op.getArgument(2);
Value weight_kernel = func_op.getArgument(3);
Value recurrent_kernel = func_op.getArgument(4);
Value bias = func_op.getArgument(5);
if (func_op.getNumResults() != 5) return failure();
auto time_major_attr = func_op->getAttrOfType<BoolAttr>("tf.time_major");
if (time_major_attr == nullptr) return failure();
bool time_majored = time_major_attr.getValue();
auto input_type = mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
if (!input_type) {
func_op.emitError() << "Input type is not a ranked tensor type";
return failure();
}
auto final_inputs = input;
auto final_input_type = input_type;
auto go_backwards_attr = func_op->getAttrOfType<BoolAttr>("tf.go_backwards");
if (go_backwards_attr != nullptr && go_backwards_attr.getValue()) {
int time_dim = time_majored ? 0 : 1;
final_inputs = Reverse(builder, final_inputs, time_dim, final_input_type,
func_op.getLoc());
}
int64_t batch = time_majored ? final_input_type.getDimSize(1)
: final_input_type.getDimSize(0);
int64_t time = time_majored ? final_input_type.getDimSize(0)
: final_input_type.getDimSize(1);
RankedTensorType weight_type =
mlir::cast<RankedTensorType>(weight_kernel.getType());
if (weight_type.getRank() != 2)
return func_op.emitError() << "The weight should be rank of 2";
Value transposed_weight_kernel =
Transpose2D(builder, weight_kernel, weight_type, func_op.getLoc());
RankedTensorType recurrent_kernel_type =
mlir::cast<RankedTensorType>(recurrent_kernel.getType());
const int64_t n_output = recurrent_kernel_type.getDimSize(0);
Value transpose_recurrent_kernel = Transpose2D(
builder, recurrent_kernel, recurrent_kernel_type, func_op.getLoc());
const int splits = 4;
Operation* weights_array;
if (failed(CreateEqualSizeSplitVOp(transposed_weight_kernel, 0, splits,
func_op.getLoc(), builder,
&weights_array)))
return failure();
Operation* recurrent_weights_array;
if (failed(CreateEqualSizeSplitVOp(transpose_recurrent_kernel, 0, splits,
func_op.getLoc(), builder,
&recurrent_weights_array)))
return failure();
Value recurrent_to_input_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(0),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(0);
Value recurrent_to_forget_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(1),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(1);
Value recurrent_to_cell_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(2),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(2);
Value recurrent_to_output_weights =
indy ? mlir::cast<Value>(
CreateFlattenOP(recurrent_weights_array->getResult(3),
func_op.getLoc(), builder)
.getResult())
: recurrent_weights_array->getResult(3);
Operation* bias_array;
if (failed(CreateEqualSizeSplitVOp(bias, 0, splits, func_op.getLoc(), builder,
&bias_array)))
return failure();
SmallVector<int64_t, 3> output_shape;
if (time_majored) {
output_shape = {time, batch, n_output};
} else {
output_shape = {batch, time, n_output};
}
auto result_type = mlir::RankedTensorType::get(
output_shape,
mlir::cast<RankedTensorType>(final_inputs.getType()).getElementType());
Value none = CreateNoneValue(builder, func_op.getLoc());
auto lstm = builder->create<mlir::TFL::UnidirectionalSequenceLSTMOp>(
func_op.getLoc(), result_type, final_inputs,
weights_array->getResult(0),
weights_array->getResult(1),
weights_array->getResult(2),
weights_array->getResult(3),
recurrent_to_input_weights,
recurrent_to_forget_weights,
recurrent_to_cell_weights,
recurrent_to_output_weights,
none,
none,
none,
bias_array->getResult(0),
bias_array->getResult(1),
bias_array->getResult(2),
bias_array->getResult(3),
none,
none,
output_init_state,
hidden_init_state,
none,
none,
none,
none,
builder->getStringAttr("TANH"),
builder->getF32FloatAttr(10.0),
builder->getF32FloatAttr(0.0),
builder->getBoolAttr(time_majored),
mlir::BoolAttr(),
builder->getBoolAttr(indy),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr(),
mlir::TypeAttr());
auto final_output_full_sequences = lstm.getResult();
SmallVector<int64_t, 2> last_output_shape({batch, n_output});
SmallVector<int32_t, 3> end({0, 0, 0});
SmallVector<int32_t, 3> strides({1, 1, 1});
SmallVector<int32_t, 3> begin;
int64_t new_axis_mask = 0;
int64_t ellipsis_mask = 0;
int64_t begin_mask;
int64_t end_mask;
int64_t shrink_axis_mask;
if (time_majored) {
begin_mask = 6;
end_mask = 6;
shrink_axis_mask = 1;
begin = {-1, 0, 0};
} else {
begin_mask = 5;
end_mask = 5;
shrink_axis_mask = 2;
begin = {0, -1, 0};
}
auto last_output = CreateStridedSliceOp(
func_op.getLoc(), last_output_shape, final_output_full_sequences, begin,
end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask,
shrink_axis_mask, builder);
SmallVector<Value, 5> outputs;
SmallVector<Type, 5> output_types;
outputs.push_back(last_output);
output_types.push_back(last_output.getType());
outputs.push_back(final_output_full_sequences);
output_types.push_back(final_output_full_sequences.getType());
for (int i = 2; i < 5; ++i) {
auto result_type =
mlir::dyn_cast<RankedTensorType>(func_op.getResultTypes()[i]);
outputs.push_back(CreatTfF32ConstOp(builder, result_type.getShape(), 0.0f,
func_op.getLoc()));
output_types.push_back(result_type);
}
func_op.setType(mlir::FunctionType::get(func_op.getContext(),
func_op.getFunctionType().getInputs(),
output_types));
builder->create<mlir::func::ReturnOp>(func_op.getLoc(), outputs);
return success();
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/lstm_utils.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
func::FuncOp createLstmCompositeFunc(mlir::Builder* builder, bool ln,
bool cifg) {
SmallVector<int64_t, 2> input_shape{1, 2};
SmallVector<int64_t, 2> weight_shape{3, 12};
SmallVector<int64_t, 1> bias_shape{2};
SmallVector<int64_t, 2> projection_shape{1, 2};
SmallVector<int64_t, 1> layer_norm_scale{4};
SmallVector<int64_t, 2> output_shape{1, 2};
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto weight_type = RankedTensorType::get(weight_shape, builder->getF32Type());
auto bias_type = RankedTensorType::get(bias_shape, builder->getF32Type());
auto projection_type =
RankedTensorType::get(projection_shape, builder->getF32Type());
auto layer_norm_scale_type =
RankedTensorType::get(layer_norm_scale, builder->getF32Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 4> input_types{input_type, weight_type, bias_type,
projection_type,
layer_norm_scale_type};
auto func_type = builder->getFunctionType(input_types, output_type);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
std::vector<std::string> attributes;
if (ln) {
attributes.push_back(kLayerNormalizedLstmCellSimple);
} else {
attributes.push_back(kLstmCellSimple);
}
if (cifg) {
attributes.push_back(kCoupleInputForgetGates);
}
mlir::StringAttr attr_values =
builder->getStringAttr(llvm::join(attributes, ","));
func->setAttr(kTFImplements, attr_values);
return func;
}
class LstmUtilsTest : public ::testing::Test {
protected:
LstmUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<arith::ArithDialect, mlir::func::FuncDialect,
tensor::TensorDialect, mlir::TF::TensorFlowDialect,
TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_lstm_func_ = createLstmCompositeFunc(builder_.get(), false, false);
fused_lstm_func_cifg_ =
createLstmCompositeFunc(builder_.get(), false, true);
fused_ln_lstm_func_ = createLstmCompositeFunc(builder_.get(), true, false);
}
void TearDown() override {
fused_lstm_func_.erase();
fused_lstm_func_cifg_.erase();
fused_ln_lstm_func_.erase();
builder_.reset();
}
func::FuncOp fused_lstm_func_;
func::FuncOp fused_lstm_func_cifg_;
func::FuncOp fused_ln_lstm_func_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(LstmUtilsTest, ConvertLSTMCellSimple) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_.dump();
EXPECT_EQ(
fused_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto transpose_op = fused_lstm_func_.getBody().front().begin();
transpose_op++;
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(0),
3);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getOperand(0).getType())
.getDimSize(1),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(0),
12);
EXPECT_EQ(mlir::cast<RankedTensorType>(transpose_op->getResult(0).getType())
.getDimSize(1),
3);
auto it = fused_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType())
.getElementType()
.isF32());
EXPECT_TRUE(
mlir::cast<ElementsAttr>(mlir::cast<mlir::arith::ConstantOp>(
it->getOpOperand(15).get().getDefiningOp())
.getValue())
.getValues<FloatAttr>()[0]
.getValue()
.isExactlyValue(0.0f));
EXPECT_EQ(fused_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
TEST_F(LstmUtilsTest, ConvertLSTMCellSimpleToFusedLSTMCoupleInputForget) {
mlir::TFL::ConvertLSTMCellSimpleToFusedLSTM convert(fused_lstm_func_cifg_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_lstm_func_cifg_.dump();
llvm::SmallVector<std::string, 2> attributes{kLstmCellSimple,
kCoupleInputForgetGates};
EXPECT_EQ(fused_lstm_func_cifg_->getAttrOfType<StringAttr>(kTFImplements)
.getValue(),
llvm::join(attributes, ","));
auto it = fused_lstm_func_cifg_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(1).getType()));
}
TEST_F(LstmUtilsTest, ConvertLayerNormLSTMCellSimpleToFusedLSTM) {
mlir::TFL::ConvertLayerNormalizedLSTMCellSimpleToFusedLSTM convert(
fused_ln_lstm_func_);
auto result = convert.RewriteFunc();
EXPECT_FALSE(failed(result));
fused_ln_lstm_func_.dump();
EXPECT_EQ(
fused_ln_lstm_func_->getAttrOfType<StringAttr>(kTFImplements).getValue(),
convert.GetCompositeOpName());
EXPECT_EQ(fused_ln_lstm_func_.getNumArguments(), 5);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto it = fused_ln_lstm_func_.getBody().back().rbegin();
EXPECT_EQ(it->getName().getStringRef(),
mlir::func::ReturnOp::getOperationName());
it++;
it++;
EXPECT_EQ(it->getName().getStringRef(),
mlir::TFL::LSTMOp::getOperationName());
EXPECT_EQ(it->getNumOperands(), 24);
EXPECT_EQ(it->getNumResults(), 1);
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(1).getType()));
EXPECT_FALSE(mlir::isa<NoneType>(it->getOperand(20).getType()));
EXPECT_EQ(mlir::cast<RankedTensorType>(it->getOperand(20).getType())
.getShape()
.size(),
1);
EXPECT_EQ(
mlir::cast<RankedTensorType>(it->getOperand(20).getType()).getDimSize(0),
3);
EXPECT_EQ(fused_ln_lstm_func_.getFunctionType().getNumResults(), 1);
auto output_types = fused_ln_lstm_func_.getFunctionType().getResults();
SmallVector<int64_t, 2> output_shape{1, mlir::ShapedType::kDynamic};
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getShape().size(),
output_shape.size());
for (int i = 0; i < output_shape.size(); i++) {
EXPECT_EQ(mlir::cast<RankedTensorType>(output_types[0]).getDimSize(i),
output_shape[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/lstm_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa95446b-57f9-41a9-b640-95014161ac37 | cpp | google/libphonenumber | phonenumber_offline_geocoder | cpp/src/phonenumbers/geocoding/phonenumber_offline_geocoder.cc | cpp/test/phonenumbers/geocoding/phonenumber_offline_geocoder_test.cc | #include "phonenumbers/geocoding/phonenumber_offline_geocoder.h"
#include <algorithm>
#include <string>
#include <unicode/unistr.h>
#include "phonenumbers/geocoding/area_code_map.h"
#include "phonenumbers/geocoding/geocoding_data.h"
#include "phonenumbers/geocoding/mapping_file_provider.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/stl_util.h"
#include "absl/synchronization/mutex.h"
namespace i18n {
namespace phonenumbers {
using icu::UnicodeString;
using std::string;
namespace {
bool IsLowerThan(const char* s1, const char* s2) {
return strcmp(s1, s2) < 0;
}
}
PhoneNumberOfflineGeocoder::PhoneNumberOfflineGeocoder() {
Init(get_country_calling_codes(), get_country_calling_codes_size(),
get_country_languages, get_prefix_language_code_pairs(),
get_prefix_language_code_pairs_size(), get_prefix_descriptions);
}
PhoneNumberOfflineGeocoder::PhoneNumberOfflineGeocoder(
const int* country_calling_codes, int country_calling_codes_size,
country_languages_getter get_country_languages,
const char** prefix_language_code_pairs,
int prefix_language_code_pairs_size,
prefix_descriptions_getter get_prefix_descriptions) {
Init(country_calling_codes, country_calling_codes_size,
get_country_languages, prefix_language_code_pairs,
prefix_language_code_pairs_size, get_prefix_descriptions);
}
void PhoneNumberOfflineGeocoder::Init(
const int* country_calling_codes, int country_calling_codes_size,
country_languages_getter get_country_languages,
const char** prefix_language_code_pairs,
int prefix_language_code_pairs_size,
prefix_descriptions_getter get_prefix_descriptions) {
phone_util_ = PhoneNumberUtil::GetInstance();
provider_.reset(new MappingFileProvider(country_calling_codes,
country_calling_codes_size,
get_country_languages));
prefix_language_code_pairs_ = prefix_language_code_pairs;
prefix_language_code_pairs_size_ = prefix_language_code_pairs_size;
get_prefix_descriptions_ = get_prefix_descriptions;
}
PhoneNumberOfflineGeocoder::~PhoneNumberOfflineGeocoder() {
absl::MutexLock l(&mu_);
gtl::STLDeleteContainerPairSecondPointers(
available_maps_.begin(), available_maps_.end());
}
const AreaCodeMap* PhoneNumberOfflineGeocoder::GetPhonePrefixDescriptions(
int prefix, const string& language, const string& script,
const string& region) const {
string filename;
provider_->GetFileName(prefix, language, script, region, &filename);
if (filename.empty()) {
return NULL;
}
AreaCodeMaps::const_iterator it = available_maps_.find(filename);
if (it == available_maps_.end()) {
return LoadAreaCodeMapFromFile(filename);
}
return it->second;
}
const AreaCodeMap* PhoneNumberOfflineGeocoder::LoadAreaCodeMapFromFile(
const string& filename) const {
const char** const prefix_language_code_pairs_end =
prefix_language_code_pairs_ + prefix_language_code_pairs_size_;
const char** const prefix_language_code_pair =
std::lower_bound(prefix_language_code_pairs_,
prefix_language_code_pairs_end,
filename.c_str(), IsLowerThan);
if (prefix_language_code_pair != prefix_language_code_pairs_end &&
filename.compare(*prefix_language_code_pair) == 0) {
AreaCodeMap* const m = new AreaCodeMap();
m->ReadAreaCodeMap(get_prefix_descriptions_(
prefix_language_code_pair - prefix_language_code_pairs_));
return available_maps_.insert(AreaCodeMaps::value_type(filename, m))
.first->second;
}
return NULL;
}
string PhoneNumberOfflineGeocoder::GetCountryNameForNumber(
const PhoneNumber& number, const Locale& language) const {
string region_code;
phone_util_->GetRegionCodeForNumber(number, ®ion_code);
return GetRegionDisplayName(®ion_code, language);
}
string PhoneNumberOfflineGeocoder::GetRegionDisplayName(
const string* region_code, const Locale& language) const {
if (region_code == NULL || region_code->compare("ZZ") == 0 ||
region_code->compare(
PhoneNumberUtil::kRegionCodeForNonGeoEntity) == 0) {
return "";
}
UnicodeString udisplay_country;
icu::Locale("", region_code->c_str()).getDisplayCountry(
language, udisplay_country);
string display_country;
udisplay_country.toUTF8String(display_country);
return display_country;
}
string PhoneNumberOfflineGeocoder::GetDescriptionForValidNumber(
const PhoneNumber& number, const Locale& language) const {
const char* const description = GetAreaDescription(
number, language.getLanguage(), "", language.getCountry());
return *description != '\0'
? description
: GetCountryNameForNumber(number, language);
}
string PhoneNumberOfflineGeocoder::GetDescriptionForValidNumber(
const PhoneNumber& number, const Locale& language,
const string& user_region) const {
string region_code;
phone_util_->GetRegionCodeForNumber(number, ®ion_code);
if (user_region.compare(region_code) == 0) {
return GetDescriptionForValidNumber(number, language);
}
return GetRegionDisplayName(®ion_code, language);
}
string PhoneNumberOfflineGeocoder::GetDescriptionForNumber(
const PhoneNumber& number, const Locale& locale) const {
PhoneNumberUtil::PhoneNumberType number_type =
phone_util_->GetNumberType(number);
if (number_type == PhoneNumberUtil::UNKNOWN) {
return "";
} else if (!phone_util_->IsNumberGeographical(number_type,
number.country_code())) {
return GetCountryNameForNumber(number, locale);
}
return GetDescriptionForValidNumber(number, locale);
}
string PhoneNumberOfflineGeocoder::GetDescriptionForNumber(
const PhoneNumber& number, const Locale& language,
const string& user_region) const {
PhoneNumberUtil::PhoneNumberType number_type =
phone_util_->GetNumberType(number);
if (number_type == PhoneNumberUtil::UNKNOWN) {
return "";
} else if (!phone_util_->IsNumberGeographical(number_type,
number.country_code())) {
return GetCountryNameForNumber(number, language);
}
return GetDescriptionForValidNumber(number, language, user_region);
}
const char* PhoneNumberOfflineGeocoder::GetAreaDescription(
const PhoneNumber& number, const string& lang, const string& script,
const string& region) const {
const int country_calling_code = number.country_code();
const int phone_prefix = country_calling_code;
absl::MutexLock l(&mu_);
const AreaCodeMap* const descriptions = GetPhonePrefixDescriptions(
phone_prefix, lang, script, region);
const char* description = descriptions ? descriptions->Lookup(number) : NULL;
if ((!description || *description == '\0') && MayFallBackToEnglish(lang)) {
const AreaCodeMap* default_descriptions = GetPhonePrefixDescriptions(
phone_prefix, "en", "", "");
if (!default_descriptions) {
return "";
}
description = default_descriptions->Lookup(number);
}
return description ? description : "";
}
bool PhoneNumberOfflineGeocoder::MayFallBackToEnglish(
const string& lang) const {
return lang.compare("zh") && lang.compare("ja") && lang.compare("ko");
}
}
} | #include "phonenumbers/geocoding/phonenumber_offline_geocoder.h"
#include <gtest/gtest.h>
#include <unicode/locid.h>
#include "phonenumbers/geocoding/geocoding_test_data.h"
#include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumber.pb.h"
namespace i18n {
namespace phonenumbers {
using icu::Locale;
namespace {
PhoneNumber MakeNumber(int32 country_code, uint64 national_number) {
PhoneNumber n;
n.set_country_code(country_code);
n.set_national_number(national_number);
return n;
}
const Locale kEnglishLocale = Locale("en", "GB");
const Locale kFrenchLocale = Locale("fr", "FR");
const Locale kGermanLocale = Locale("de", "DE");
const Locale kItalianLocale = Locale("it", "IT");
const Locale kKoreanLocale = Locale("ko", "KR");
const Locale kSimplifiedChineseLocale = Locale("zh", "CN");
}
class PhoneNumberOfflineGeocoderTest : public testing::Test {
protected:
PhoneNumberOfflineGeocoderTest() :
KO_NUMBER1(MakeNumber(82, 22123456UL)),
KO_NUMBER2(MakeNumber(82, 322123456UL)),
KO_NUMBER3(MakeNumber(82, uint64{6421234567})),
KO_INVALID_NUMBER(MakeNumber(82, 1234UL)),
KO_MOBILE(MakeNumber(82, uint64{101234567})),
US_NUMBER1(MakeNumber(1, uint64{6502530000})),
US_NUMBER2(MakeNumber(1, uint64{6509600000})),
US_NUMBER3(MakeNumber(1, 2128120000UL)),
US_NUMBER4(MakeNumber(1, uint64{6174240000})),
US_INVALID_NUMBER(MakeNumber(1, 123456789UL)),
BS_NUMBER1(MakeNumber(1, 2423651234UL)),
AU_NUMBER(MakeNumber(61, 236618300UL)),
NUMBER_WITH_INVALID_COUNTRY_CODE(MakeNumber(999, 2423651234UL)),
INTERNATIONAL_TOLL_FREE(MakeNumber(800, 12345678UL)) {
}
virtual void SetUp() {
geocoder_.reset(
new PhoneNumberOfflineGeocoder(
get_test_country_calling_codes(),
get_test_country_calling_codes_size(),
get_test_country_languages,
get_test_prefix_language_code_pairs(),
get_test_prefix_language_code_pairs_size(),
get_test_prefix_descriptions));
}
protected:
scoped_ptr<PhoneNumberOfflineGeocoder> geocoder_;
const PhoneNumber KO_NUMBER1;
const PhoneNumber KO_NUMBER2;
const PhoneNumber KO_NUMBER3;
const PhoneNumber KO_INVALID_NUMBER;
const PhoneNumber KO_MOBILE;
const PhoneNumber US_NUMBER1;
const PhoneNumber US_NUMBER2;
const PhoneNumber US_NUMBER3;
const PhoneNumber US_NUMBER4;
const PhoneNumber US_INVALID_NUMBER;
const PhoneNumber BS_NUMBER1;
const PhoneNumber AU_NUMBER;
const PhoneNumber NUMBER_WITH_INVALID_COUNTRY_CODE;
const PhoneNumber INTERNATIONAL_TOLL_FREE;
};
TEST_F(PhoneNumberOfflineGeocoderTest,
TestGetDescriptionForNumberWithNoDataFile) {
EXPECT_EQ("\xe7""\xbe""\x8e""\xe5""\x9b""\xbd",
geocoder_->GetDescriptionForNumber(US_NUMBER1,
kSimplifiedChineseLocale));
EXPECT_EQ("Bahamas",
geocoder_->GetDescriptionForNumber(BS_NUMBER1, Locale("en", "US")));
EXPECT_EQ("Australia",
geocoder_->GetDescriptionForNumber(AU_NUMBER, Locale("en", "US")));
EXPECT_EQ("",
geocoder_->GetDescriptionForNumber(NUMBER_WITH_INVALID_COUNTRY_CODE,
Locale("en", "US")));
EXPECT_EQ("",
geocoder_->GetDescriptionForNumber(INTERNATIONAL_TOLL_FREE,
Locale("en", "US")));
}
TEST_F(PhoneNumberOfflineGeocoderTest,
TestGetDescriptionForNumberWithMissingPrefix) {
EXPECT_EQ("United States",
geocoder_->GetDescriptionForNumber(US_NUMBER4, Locale("en", "US")));
}
TEST_F(PhoneNumberOfflineGeocoderTest, TestGetDescriptionForNumber_en_US) {
EXPECT_EQ("CA",
geocoder_->GetDescriptionForNumber(US_NUMBER1, Locale("en", "US")));
EXPECT_EQ("Mountain View, CA",
geocoder_->GetDescriptionForNumber(US_NUMBER2, Locale("en", "US")));
EXPECT_EQ("New York, NY",
geocoder_->GetDescriptionForNumber(US_NUMBER3, Locale("en", "US")));
}
TEST_F(PhoneNumberOfflineGeocoderTest, TestGetDescriptionForKoreanNumber) {
EXPECT_EQ("Seoul",
geocoder_->GetDescriptionForNumber(KO_NUMBER1, kEnglishLocale));
EXPECT_EQ("Incheon",
geocoder_->GetDescriptionForNumber(KO_NUMBER2, kEnglishLocale));
EXPECT_EQ("Jeju",
geocoder_->GetDescriptionForNumber(KO_NUMBER3, kEnglishLocale));
EXPECT_EQ("\xec""\x84""\x9c""\xec""\x9a""\xb8",
geocoder_->GetDescriptionForNumber(KO_NUMBER1, kKoreanLocale));
EXPECT_EQ("\xec""\x9d""\xb8""\xec""\xb2""\x9c",
geocoder_->GetDescriptionForNumber(KO_NUMBER2, kKoreanLocale));
}
TEST_F(PhoneNumberOfflineGeocoderTest, TestGetDescriptionForFallBack) {
EXPECT_EQ("Kalifornien",
geocoder_->GetDescriptionForNumber(US_NUMBER1, kGermanLocale));
EXPECT_EQ("New York, NY",
geocoder_->GetDescriptionForNumber(US_NUMBER3, kGermanLocale));
EXPECT_EQ("CA",
geocoder_->GetDescriptionForNumber(US_NUMBER1, kItalianLocale));
EXPECT_EQ("\xeb""\x8c""\x80""\xed""\x95""\x9c""\xeb""\xaf""\xbc""\xea""\xb5"
"\xad",
geocoder_->GetDescriptionForNumber(KO_NUMBER3, kKoreanLocale));
}
TEST_F(PhoneNumberOfflineGeocoderTest,
TestGetDescriptionForNumberWithUserRegion) {
EXPECT_EQ("Estados Unidos",
geocoder_->GetDescriptionForNumber(US_NUMBER1, Locale("es", "ES"),
"IT"));
EXPECT_EQ("Estados Unidos",
geocoder_->GetDescriptionForNumber(US_NUMBER1, Locale("es", "ES"),
"ZZ"));
EXPECT_EQ("Kalifornien",
geocoder_->GetDescriptionForNumber(US_NUMBER1, kGermanLocale,
"US"));
EXPECT_EQ("CA",
geocoder_->GetDescriptionForNumber(US_NUMBER1, kFrenchLocale,
"US"));
EXPECT_EQ("",
geocoder_->GetDescriptionForNumber(US_INVALID_NUMBER,
kEnglishLocale,
"US"));
}
TEST_F(PhoneNumberOfflineGeocoderTest, TestGetDescriptionForInvalidNumber) {
EXPECT_EQ("", geocoder_->GetDescriptionForNumber(KO_INVALID_NUMBER,
kEnglishLocale));
EXPECT_EQ("", geocoder_->GetDescriptionForNumber(US_INVALID_NUMBER,
kEnglishLocale));
}
TEST_F(PhoneNumberOfflineGeocoderTest,
TestGetDescriptionForNonGeographicalNumberWithGeocodingPrefix) {
EXPECT_EQ("South Korea",
geocoder_->GetDescriptionForNumber(KO_MOBILE, kEnglishLocale));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/geocoding/phonenumber_offline_geocoder.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/geocoding/phonenumber_offline_geocoder_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
20dd759b-3844-48bd-aad9-73b89823041d | cpp | tensorflow/tensorflow | finalize_dataset_op | tensorflow/core/kernels/data/finalize_dataset_op.cc | tensorflow/core/kernels/data/finalize_dataset_op_test.cc | #include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/experimental/threadpool_dataset_op.h"
#include "tensorflow/core/kernels/data/model_dataset_op.h"
#include "tensorflow/core/kernels/data/optimize_dataset_op.h"
namespace tensorflow {
namespace data {
constexpr const char* const FinalizeDatasetOp::kDatasetType;
constexpr const char* const FinalizeDatasetOp::kInputDataset;
constexpr const char* const FinalizeDatasetOp::kOutputTypes;
constexpr const char* const FinalizeDatasetOp::kOutputShapes;
constexpr const char* const FinalizeDatasetOp::kHasCapturedRef;
namespace {
void GetModelDatasetParams(const Options& options,
model::AutotuneAlgorithm* algorithm,
int64_t* cpu_budget, int64_t* ram_budget) {
*algorithm = model::AutotuneAlgorithm::HILL_CLIMB;
*cpu_budget = options.autotune_options().cpu_budget();
*ram_budget = options.autotune_options().ram_budget();
}
void MakeDatasetHelper(OpKernelContext* ctx, bool has_captured_ref,
DatasetBase* input, DatasetBase** output) {
*output = input;
input->Ref();
const Options& options = input->options();
if (ShouldConfigureMaxIntraOpParallelism(options)) {
experimental::MaxIntraOpParallelismDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().max_intra_op_parallelism(),
output);
input->Unref();
input = *output;
}
if (ShouldUsePrivateThreadPool(options)) {
experimental::PrivateThreadPoolDatasetOp::MakeDatasetFromOptions(
ctx, input, options.threading_options().private_threadpool_size(),
output);
input->Unref();
input = *output;
}
if (ShouldUseAutotuning(options)) {
model::AutotuneAlgorithm algorithm;
int64_t cpu_budget;
int64_t ram_budget;
GetModelDatasetParams(options, &algorithm, &cpu_budget, &ram_budget);
ModelDatasetOp::MakeDatasetFromOptions(ctx, input, algorithm, cpu_budget,
ram_budget, output);
input->Unref();
input = *output;
}
absl::flat_hash_set<tstring> optimizations_enabled;
absl::flat_hash_set<tstring> optimizations_disabled;
absl::flat_hash_set<tstring> optimizations_default;
GetOptimizations(options, &optimizations_enabled, &optimizations_disabled,
&optimizations_default);
if (ShouldApplyOptimizations(options, optimizations_enabled,
optimizations_default)) {
if (has_captured_ref &&
(!optimizations_enabled.empty() || !optimizations_default.empty())) {
LOG(WARNING)
<< "tf.data graph rewrites are not compatible with reference "
"variables. The following rewrites will be disabled: "
<< absl::StrJoin(optimizations_enabled, ", ") << ", "
<< absl::StrJoin(optimizations_default, ", ") << ". "
<< "To enable rewrites, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program.";
} else {
auto optimization_configs = CreateGraphRewriteConfigs(options);
OptimizeDatasetOp::MakeDatasetFromOptions(
ctx, input, optimizations_enabled, optimizations_disabled,
optimizations_default, optimization_configs, output);
input->Unref();
input = *output;
}
}
}
}
FinalizeDatasetOp::FinalizeDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kHasCapturedRef)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kHasCapturedRef, &has_captured_ref_));
} else {
has_captured_ref_ = false;
}
}
void FinalizeDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
MakeDatasetHelper(ctx, has_captured_ref_, input, output);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset").Device(DEVICE_CPU).Priority(2),
FinalizeDatasetOp);
REGISTER_KERNEL_BUILDER(Name("FinalizeDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
FinalizeDatasetNoopOp);
}
}
} | #include "tensorflow/core/kernels/data/finalize_dataset_op.h"
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
class FinalizeDatasetParams : public DatasetParams {
public:
template <typename T>
FinalizeDatasetParams(T input_dataset_params, DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
has_captured_ref_(false) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->emplace_back(FinalizeDatasetOp::kInputDataset);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{FinalizeDatasetOp::kHasCapturedRef, has_captured_ref_},
{FinalizeDatasetOp::kOutputTypes, output_dtypes_},
{FinalizeDatasetOp::kOutputShapes, output_shapes_}};
return absl::OkStatus();
}
string dataset_type() const override { return "Finalize"; }
private:
bool has_captured_ref_;
};
class FinalizeDatasetOpTest : public DatasetOpsTestBase {
public:
void CheckDatasetPipelineTypeStrings(
const std::vector<std::string>& type_strings) {
CheckDatasetPipelineTypeString(dataset_, type_strings, 0);
}
void CheckDatasetPipelineTypeString(
const DatasetBase* dataset, const std::vector<std::string>& type_strings,
int index) {
EXPECT_GT(type_strings.size(), index);
EXPECT_EQ(dataset->type_string(), type_strings[index]);
std::vector<const DatasetBase*> input_datasets;
TF_ASSERT_OK(dataset->InputDatasets(&input_datasets));
if (input_datasets.empty()) {
return;
}
EXPECT_EQ(1, input_datasets.size());
CheckDatasetPipelineTypeString(input_datasets[0], type_strings, index + 1);
}
};
constexpr char kNoOptimizationOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kMaxIntraOpParallelismOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { max_intra_op_parallelism: 10 }
)pb";
constexpr char kPrivateThreadPoolOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: false }
threading_options { private_threadpool_size: 10 }
)pb";
constexpr char kModelOptions[] = R"pb(
optimization_options { apply_default_optimizations: false }
)pb";
constexpr char kOptimizationsDefaultOptions[] = R"pb(
autotune_options { enabled: false }
optimization_options { apply_default_optimizations: true }
)pb";
constexpr char kAllChainedDatasetsOptions[] = R"pb(
autotune_options { enabled: true }
optimization_options { apply_default_optimizations: true }
threading_options { max_intra_op_parallelism: 10 private_threadpool_size: 10 }
)pb";
OptionsDatasetParams NoOptimizationOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kNoOptimizationOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams MaxIntraOpParallelismOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kMaxIntraOpParallelismOptions,
&options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams PrivateThreadPoolOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kPrivateThreadPoolOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams ModelOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kModelOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptimizationsDefaultOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kOptimizationsDefaultOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams AllChainedDatasetsOptionsParams() {
Options options;
protobuf::TextFormat::ParseFromString(kAllChainedDatasetsOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams NoOptimizationFinalizeParams() {
return FinalizeDatasetParams(NoOptimizationOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
FinalizeDatasetParams MaxIntraOpParallelismParams() {
return FinalizeDatasetParams(MaxIntraOpParallelismOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"MaxIntraOpParallelismDatasetOp");
}
FinalizeDatasetParams PrivateThreadPoolParams() {
return FinalizeDatasetParams(PrivateThreadPoolOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"PrivateThreadPoolDatasetOp");
}
FinalizeDatasetParams ModelParams() {
return FinalizeDatasetParams(ModelOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"ModelDatasetOp");
}
FinalizeDatasetParams OptimizationsDefaultParams() {
return FinalizeDatasetParams(OptimizationsDefaultOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"private_thread_pool");
}
FinalizeDatasetParams AllChainedDatasetsParams() {
return FinalizeDatasetParams(AllChainedDatasetsOptionsParams(),
{DT_INT64},
{PartialTensorShape({})},
"inject/prefetch_ModelDataset/_9");
}
TEST_F(FinalizeDatasetOpTest, NoOptimizationNodeName) {
auto test_case_params = NoOptimizationFinalizeParams();
TF_ASSERT_OK(Initialize(test_case_params));
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"OptionsDataset", "RangeDataset"});
}
std::vector<GetNextTestCase<FinalizeDatasetParams>> GetNextTestCases() {
return {{NoOptimizationFinalizeParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{MaxIntraOpParallelismParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{PrivateThreadPoolParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{ModelParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptimizationsDefaultParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{AllChainedDatasetsParams(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(FinalizeDatasetOpTest, FinalizeDatasetParams,
GetNextTestCases())
TEST_F(FinalizeDatasetOpTest, MaxIntraOpParallelismNodeName) {
auto test_case_params = MaxIntraOpParallelismParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, PrivateThreadPoolNodeName) {
auto test_case_params = PrivateThreadPoolParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrivateThreadPoolDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, ModelNodeName) {
auto test_case_params = ModelParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"ModelDataset", "OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, OptimizationsDefaultNodeName) {
auto test_case_params = OptimizationsDefaultParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings({"PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset",
"OptionsDataset", "RangeDataset"});
}
TEST_F(FinalizeDatasetOpTest, AllChainedDatasetsNodeName) {
auto test_case_params = AllChainedDatasetsParams();
TF_ASSERT_OK(Initialize(test_case_params));
std::vector<const DatasetBase*> inputs;
Status s = dataset_->InputDatasets(&inputs);
TF_ASSERT_OK(CheckDatasetNodeName(test_case_params.node_name()));
CheckDatasetPipelineTypeStrings(
{"PrefetchDataset", "ModelDataset", "PrivateThreadPoolDataset",
"MaxIntraOpParallelismDataset", "OptionsDataset", "RangeDataset"});
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/finalize_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/finalize_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1551d810-6514-45c0-92cb-2a4bb061fe08 | cpp | tensorflow/tensorflow | identity_n_op | tensorflow/core/kernels/identity_n_op.cc | tensorflow/core/kernels/identity_n_op_test.cc | #include "tensorflow/core/kernels/identity_n_op.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
namespace tensorflow {
REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_DEFAULT), IdentityNOp);
REGISTER_KERNEL_BUILDER(Name("IdentityN").Device(DEVICE_TPU_SYSTEM),
IdentityNOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("IdentityN");
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class IdentityNOpTest : public OpsTestBase {
protected:
Status Init(DataType input0_type, DataType input1_type) {
TF_CHECK_OK(NodeDefBuilder("op", "IdentityN")
.Input(FakeInput({input0_type, input1_type}))
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(IdentityNOpTest, Int32DoubleSuccess_6) {
TF_ASSERT_OK(Init(DT_INT32, DT_DOUBLE));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<double>(TensorShape({6}),
{7.3, 8.3, 9.3, 10.3, 11.3, 12.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected0, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_DOUBLE, TensorShape({6}));
test::FillValues<double>(&expected1, {7.3, 8.3, 9.3, 10.3, 11.3, 12.3});
test::ExpectTensorEqual<double>(expected1, *GetOutput(1));
}
TEST_F(IdentityNOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({2, 3}), {7, 8, 9, 10, 11, 12});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
test::FillValues<int32>(&expected, {7, 8, 9, 10, 11, 12});
test::ExpectTensorEqual<int32>(expected, *GetOutput(1));
}
TEST_F(IdentityNOpTest, StringInt32Success) {
TF_ASSERT_OK(Init(DT_STRING, DT_INT32));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
AddInputFromArray<int32>(TensorShape({8}), {1, 3, 5, 7, 9, 11, 13, 15});
TF_ASSERT_OK(RunOpKernel());
Tensor expected0(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected0, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected0, *GetOutput(0));
Tensor expected1(allocator(), DT_INT32, TensorShape({8}));
test::FillValues<int32>(&expected1, {1, 3, 5, 7, 9, 11, 13, 15});
test::ExpectTensorEqual<int32>(expected1, *GetOutput(1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_n_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/identity_n_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cc0b3645-c11e-4cfb-b58e-5f062405c65f | cpp | tensorflow/tensorflow | hlo_replication_analysis | third_party/xla/xla/service/hlo_replication_analysis.cc | third_party/xla/xla/service/hlo_replication_analysis_test.cc | #include "xla/service/hlo_replication_analysis.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::DetermineHloInstructionIsReplicated(
const HloInstruction* hlo, const ShapeIndex& index,
bool cross_partition_spmd,
const absl::flat_hash_map<const HloInstruction*, ShapeTree<HloReplication>>&
hlo_replication,
bool support_partial_replication) {
const auto merge_operand_replication = [&hlo_replication](
const HloInstruction* inst) {
HloReplication replication = HloReplication::ReplicatedOnAllDevices();
for (auto operand : inst->operands()) {
auto operand_it = hlo_replication.find(operand);
if (operand_it == hlo_replication.end()) {
replication = replication.Merge(HloReplication::UniqueOnAllDevices());
} else {
replication = replication.Merge(operand_it->second.element({}));
}
}
return replication;
};
if (hlo->opcode() == HloOpcode::kAllReduce ||
hlo->opcode() == HloOpcode::kAllGather) {
HloReplication replication = merge_operand_replication(hlo);
if (replication.IsReplicatedOnAllDevices()) {
return replication;
}
if (!hlo->channel_id().has_value()) {
if (cross_partition_spmd) {
return replication;
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
}
if (support_partial_replication) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const ReplicaGroup& replica_group : hlo->replica_groups()) {
device_sets.push_back(replica_group.replica_ids());
}
return HloReplication::PartiallyReplicated(device_sets);
} else {
return HloReplication::UniqueOnAllDevices();
}
} else {
bool global_id;
if (hlo->opcode() == HloOpcode::kAllReduce) {
global_id = Cast<HloAllReduceInstruction>(hlo)->use_global_device_ids();
} else {
global_id = Cast<HloAllGatherInstruction>(hlo)->use_global_device_ids();
}
if (global_id) {
bool replicated_across_partitions = true;
bool replicated_across_replicas = true;
const int64_t num_partitions =
hlo->GetModule()->config().num_partitions();
absl::flat_hash_set<int64_t> visited_partitions;
absl::flat_hash_set<int64_t> visited_replicas;
for (const auto& group : hlo->replica_groups()) {
visited_partitions.clear();
visited_replicas.clear();
visited_replicas.reserve(group.replica_ids().size());
visited_partitions.reserve(group.replica_ids().size());
for (int64_t id : group.replica_ids()) {
int64_t rid = id / num_partitions;
int64_t pid = id % num_partitions;
visited_partitions.insert(pid);
visited_replicas.insert(rid);
}
replicated_across_partitions &=
visited_partitions.size() == num_partitions;
replicated_across_replicas &=
visited_replicas.size() ==
hlo->GetModule()->config().replica_count();
}
if ((cross_partition_spmd && replicated_across_partitions) ||
(!cross_partition_spmd && replicated_across_replicas)) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
if (cross_partition_spmd) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->replica_groups().empty() || hlo->replica_groups().size() == 1) {
return HloReplication::ReplicatedOnAllDevices();
} else {
return HloReplication::UniqueOnAllDevices();
}
}
}
if (hlo->HasSideEffectNoRecurse()) {
return HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kReplicaId) {
return cross_partition_spmd ? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kPartitionId) {
return cross_partition_spmd ? HloReplication::UniqueOnAllDevices()
: HloReplication::ReplicatedOnAllDevices();
}
auto it = hlo_replication.find(hlo);
if (hlo->opcode() == HloOpcode::kParameter) {
CHECK(it != hlo_replication.end());
return it->second.element(index);
}
if (it != hlo_replication.end() &&
it->second.element(index).IsUniqueOnAllDevices()) {
return it->second.element(index);
}
if (hlo->opcode() == HloOpcode::kConstant) {
return HloReplication::ReplicatedOnAllDevices();
}
if (hlo->opcode() == HloOpcode::kCustomCall &&
(hlo->custom_call_target() == "X64SplitLow" ||
hlo->custom_call_target() == "X64SplitHigh" ||
hlo->custom_call_target() == "X64Combine")) {
return merge_operand_replication(hlo);
}
if (support_partial_replication) {
if (hlo->opcode() == HloOpcode::kDynamicSlice) {
const HloInstruction* ds_buffer = hlo->operand(0);
if (hlo->dynamic_slice_sizes().size() == 1 &&
hlo->dynamic_slice_sizes()[0] == 1 &&
ds_buffer->opcode() == HloOpcode::kConstant &&
ds_buffer->shape().rank() == 1 &&
ds_buffer->shape().element_type() == PrimitiveType::S32 &&
((cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kPartitionId) ||
(!cross_partition_spmd &&
hlo->operand(1)->opcode() == HloOpcode::kReplicaId))) {
const HloModule* hlo_module = hlo->GetModule();
int64_t num_devices = cross_partition_spmd
? hlo_module->config().num_partitions()
: hlo_module->config().replica_count();
absl::flat_hash_map<int64_t, std::vector<int64_t>> value_to_device_set;
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
std::optional<int64_t> value =
ds_buffer->literal().GetIntegralAsS64({device_id});
value_to_device_set[*value].push_back(device_id);
}
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.second));
}
return HloReplication::PartiallyReplicated(device_sets);
}
}
}
if (hlo->IsElementwise() ||
hlo->opcode() == HloOpcode::kConcatenate ||
hlo->opcode() == HloOpcode::kConvolution ||
hlo->opcode() == HloOpcode::kDot ||
hlo->opcode() == HloOpcode::kReduce ||
hlo->opcode() == HloOpcode::kBroadcast ||
hlo->opcode() == HloOpcode::kTranspose ||
hlo->opcode() == HloOpcode::kReshape ||
hlo->opcode() == HloOpcode::kBitcast ||
hlo->opcode() == HloOpcode::kReverse ||
hlo->opcode() == HloOpcode::kGather ||
hlo->opcode() == HloOpcode::kScatter ||
hlo->opcode() == HloOpcode::kIota ||
hlo->opcode() == HloOpcode::kPad ||
hlo->opcode() == HloOpcode::kSlice ||
hlo->opcode() == HloOpcode::kDynamicSlice ||
hlo->opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo->opcode() == HloOpcode::kReduceWindow ||
hlo->opcode() == HloOpcode::kCopy) {
return merge_operand_replication(hlo);
}
return HloReplication::UniqueOnAllDevices();
}
bool HloReplicationAnalysis::ComputeHloReplicationOnComputation(
const HloComputation* computation, bool mark_everything_not_replicated) {
bool changed = false;
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
auto assign_or_combine_shapetree =
[&](ShapeTree<HloReplication>&& to_combine,
const HloInstruction* dest) {
auto it = hlo_replication_.find(dest);
if (it == hlo_replication_.end()) {
hlo_replication_[dest] = std::move(to_combine);
return true;
}
bool updated = false;
it->second.ForEachMutableElement(
[&](const ShapeIndex& index, HloReplication* element) {
HloReplication new_replication =
element->Merge(to_combine.element(index));
if (!element->Equal(new_replication)) {
*element = std::move(new_replication);
updated = true;
}
});
return updated;
};
auto propagate_shapetree = [&](const HloInstruction* source,
const HloInstruction* dest) {
auto source_it = hlo_replication_.find(source);
if (source_it == hlo_replication_.end()) {
return false;
}
return assign_or_combine_shapetree(
ShapeTree<HloReplication>(source_it->second), dest);
};
if (inst->opcode() == HloOpcode::kWhile) {
while (true) {
bool updated = propagate_shapetree(
inst->operand(0),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->while_body()->root_instruction(),
inst->while_condition()->parameter_instruction(0));
updated |= propagate_shapetree(
inst->operand(0), inst->while_body()->parameter_instruction(0));
updated |=
propagate_shapetree(inst->while_body()->root_instruction(),
inst->while_body()->parameter_instruction(0));
updated |= ComputeHloReplicationOnComputation(
inst->while_condition(), mark_everything_not_replicated);
if (!ContainsKey(loops_known_with_same_iterations_, inst) &&
!hlo_replication_[inst->while_condition()->root_instruction()]
.element({})
.IsReplicatedOnAllDevices()) {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), true);
} else {
updated |= ComputeHloReplicationOnComputation(
inst->while_body(), mark_everything_not_replicated);
}
if (!updated) {
break;
}
changed = true;
}
changed |= propagate_shapetree(inst->operand(0), inst);
changed |=
propagate_shapetree(inst->while_body()->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kCall ||
inst->opcode() == HloOpcode::kFusion) {
auto called = inst->called_computations().front();
for (int64_t i = 0; i < inst->operand_count(); ++i) {
changed |= propagate_shapetree(inst->operand(i),
called->parameter_instruction(i));
}
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
} else if (inst->opcode() == HloOpcode::kConditional) {
for (int64_t i = 0; i < inst->called_computations().size(); ++i) {
changed |= propagate_shapetree(
inst->operand(i + 1),
inst->called_computations()[i]->parameter_instruction(0));
}
if (!hlo_replication_[inst->operand(0)]
.element({})
.IsReplicatedOnAllDevices()) {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called,
true);
}
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
for (auto called : inst->called_computations()) {
changed |= ComputeHloReplicationOnComputation(
called, mark_everything_not_replicated);
changed |= propagate_shapetree(called->root_instruction(), inst);
}
}
} else if (inst->opcode() == HloOpcode::kTuple) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
for (int64_t i = 0; i < inst->operand_count(); ++i) {
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(i)], {}, {i});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kOptimizationBarrier) {
ShapeTree<HloReplication> shape_tree = hlo_replication_[inst->operand(0)];
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kGetTupleElement) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
shape_tree.CopySubtreeFrom(hlo_replication_[inst->operand(0)],
{inst->tuple_index()}, {});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else if (inst->opcode() == HloOpcode::kInfeed && cross_partition_spmd_) {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::UniqueOnAllDevices());
if (inst->has_sharding()) {
auto sharding = inst->sharding().GetAsShapeTree(inst->shape());
shape_tree.ForEachMutableElement(
[&sharding](const ShapeIndex& index, HloReplication* data) {
*data = sharding.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
});
}
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
} else {
if (mark_everything_not_replicated) {
changed |= assign_or_combine_shapetree(
ShapeTree<HloReplication>(inst->shape(),
HloReplication::UniqueOnAllDevices()),
inst);
} else {
ShapeTree<HloReplication> shape_tree(
inst->shape(), HloReplication::ReplicatedOnAllDevices());
ShapeUtil::ForEachSubshape(
inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
*shape_tree.mutable_element(index) =
DetermineHloInstructionIsReplicated(
inst, index, cross_partition_spmd_, hlo_replication_,
support_partial_replication_);
});
changed |= assign_or_combine_shapetree(std::move(shape_tree), inst);
}
}
}
return changed;
}
absl::Status HloReplicationAnalysis::ComputeHloReplication() {
auto entry = module_->entry_computation();
for (int i = 0; i < entry->num_parameters(); ++i) {
auto param = entry->parameter_instruction(i);
ShapeTree<HloReplication> shape_tree(param->shape(),
HloReplication::UniqueOnAllDevices());
const auto& replication = param->parameter_replicated_at_leaf_buffers();
int leaf_index = 0;
absl::Status status = ShapeUtil::ForEachSubshapeWithStatus(
param->shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!ShapeUtil::IsLeafIndex(param->shape(), index)) {
return absl::OkStatus();
}
if (cross_partition_spmd_ && param->has_sharding()) {
TF_ASSIGN_OR_RETURN(auto sharding_tree,
param->sharding().AsShapeTree(param->shape()));
*shape_tree.mutable_element(index) =
sharding_tree.element(index).IsReplicated()
? HloReplication::ReplicatedOnAllDevices()
: HloReplication::UniqueOnAllDevices();
}
if (replication) {
if (!cross_partition_spmd_ && (*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::ReplicatedOnAllDevices();
}
if (cross_partition_spmd_ && !(*replication)[leaf_index]) {
*shape_tree.mutable_element(index) =
HloReplication::UniqueOnAllDevices();
}
++leaf_index;
}
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
hlo_replication_[param] = std::move(shape_tree);
}
ComputeHloReplicationOnComputation(entry,
false);
return absl::OkStatus();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
return it->second.element(index).IsReplicatedOnAllDevices();
}
bool HloReplicationAnalysis::HloInstructionIsReplicatedAt(
const HloInstruction* inst, const ShapeIndex& index,
absl::Span<const ReplicaGroup> replica_groups) const {
auto it = hlo_replication_.find(inst);
if (it == hlo_replication_.end()) {
return false;
}
VLOG(5) << "HloInstructionIsReplicatedAt is called on " << inst->name()
<< ", index: " << index.ToString()
<< ", replication: " << it->second.element(index).ToString();
if (replica_groups.empty()) {
return it->second.element(index).IsReplicatedOnAllDevices();
}
if (it->second.element(index).IsReplicatedOnAllDevices()) {
return true;
}
if (it->second.element(index).IsUniqueOnAllDevices()) {
return false;
}
for (const ReplicaGroup& replica_group : replica_groups) {
if (!it->second.element(index).IsReplicatedWithinSubgroup(
replica_group.replica_ids())) {
return false;
}
}
return true;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
return Run(module, cross_partition_spmd, &empty);
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::Run(const HloModule* module, bool cross_partition_spmd,
const absl::flat_hash_set<const HloInstruction*>*
loops_known_with_same_iterations) {
auto analysis = absl::WrapUnique(new HloReplicationAnalysis(
module, cross_partition_spmd, loops_known_with_same_iterations,
false));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
absl::StatusOr<std::unique_ptr<HloReplicationAnalysis>>
HloReplicationAnalysis::RunWithPartialReplication(const HloModule* module,
bool cross_partition_spmd) {
const absl::flat_hash_set<const HloInstruction*> empty;
auto analysis = absl::WrapUnique(
new HloReplicationAnalysis(module, cross_partition_spmd, &empty,
true));
TF_RETURN_IF_ERROR(analysis->ComputeHloReplication());
return analysis;
}
HloReplicationAnalysis::HloReplication::HloReplication()
: state_(State::kReplicatedOnAllDevices) {}
HloReplicationAnalysis::HloReplication::HloReplication(
HloReplicationAnalysis::HloReplication::State state,
absl::Span<const int64_t> device_set_root)
: state_(state),
device_set_root_(device_set_root.begin(), device_set_root.end()) {
CHECK(state == State::kPartiallyReplicated || device_set_root_.empty());
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::ReplicatedOnAllDevices() {
return HloReplication(State::kReplicatedOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::UniqueOnAllDevices() {
return HloReplication(State::kUniqueOnAllDevices, {});
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::PartiallyReplicated(
absl::Span<const absl::Span<const int64_t>> device_sets) {
int64_t max_device_id = 0;
for (const absl::Span<const int64_t>& device_set : device_sets) {
for (int64_t device_id : device_set) {
max_device_id = std::max(max_device_id, device_id);
}
}
std::vector<int64_t> device_set_root;
device_set_root.resize(max_device_id + 1);
for (const absl::Span<const int64_t>& device_set : device_sets) {
int64_t min_device_id = *absl::c_min_element(device_set);
for (int64_t device_id : device_set) {
device_set_root[device_id] = min_device_id;
}
}
return HloReplication(State::kPartiallyReplicated, device_set_root);
}
HloReplicationAnalysis::HloReplication
HloReplicationAnalysis::HloReplication::Merge(
const HloReplication& other) const {
switch (state_) {
case State::kReplicatedOnAllDevices:
return other;
case State::kUniqueOnAllDevices:
return *this;
case State::kPartiallyReplicated: {
switch (other.state_) {
case State::kReplicatedOnAllDevices:
return *this;
case State::kUniqueOnAllDevices:
return other;
case State::kPartiallyReplicated: {
absl::flat_hash_map<int64_t, std::vector<int64_t>>
value_to_device_set;
size_t num_devices = device_set_root_.size();
for (int64_t device_id = 0; device_id < num_devices; ++device_id) {
int64_t new_value = device_set_root_[device_id] * num_devices +
other.device_set_root_[device_id];
value_to_device_set[new_value].push_back(device_id);
}
CHECK_LE(value_to_device_set.size(), num_devices);
if (value_to_device_set.size() == 1) {
return ReplicatedOnAllDevices();
} else if (value_to_device_set.size() < num_devices) {
std::vector<absl::Span<const int64_t>> device_sets;
for (const auto& value_and_device_set : value_to_device_set) {
device_sets.push_back(
absl::Span<const int64_t>(value_and_device_set.second));
}
return PartiallyReplicated(device_sets);
} else {
return UniqueOnAllDevices();
}
}
}
}
}
}
bool HloReplicationAnalysis::HloReplication::Equal(
const HloReplication& other) const {
if (state_ != other.state_) {
return false;
}
return absl::c_equal(device_set_root_, other.device_set_root_);
}
bool HloReplicationAnalysis::HloReplication::IsReplicatedOnAllDevices() const {
return state_ == State::kReplicatedOnAllDevices;
}
bool HloReplicationAnalysis::HloReplication::IsUniqueOnAllDevices() const {
return state_ == State::kUniqueOnAllDevices;
}
bool HloReplicationAnalysis::HloReplication::IsReplicatedWithinSubgroup(
absl::Span<const int64_t> device_ids) const {
if (device_ids.empty()) return true;
return absl::c_all_of(device_ids, [this, &device_ids](int device_id) {
return device_set_root_[device_id] == device_set_root_[device_ids.front()];
});
}
std::string HloReplicationAnalysis::HloReplication::ToString() const {
switch (state_) {
case State::kReplicatedOnAllDevices:
return "ReplicatedOnAllDevices";
case State::kUniqueOnAllDevices:
return "UniqueOnAllDevices";
case State::kPartiallyReplicated:
return absl::StrCat("PartiallyReplicated{",
absl::StrJoin(device_set_root_, ","), "}");
}
}
} | #include "xla/service/hlo_replication_analysis.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class HloReplicationAnalysisTest : public HloTestBase {};
TEST_F(HloReplicationAnalysisTest, NoControlFlow) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}) parameter(0)
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3, all-reduce)
all-reduce-partitions = u32[] all-reduce(replica-id), channel_id=1,
to_apply=sum.u32, replica_groups={{0},{1},{2},{3}}
all-reduce-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
ROOT add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-subgroup"), {}));
}
TEST_F(HloReplicationAnalysisTest, NoControlFlowSPMD) {
const std::string module_str = R"(
HloModule NoControlFlow
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add.2 = f32[] add(a, b)
}
sum.u32 {
a = u32[] parameter(0)
b = u32[] parameter(1)
ROOT add.2 = u32[] add(a, b)
}
ENTRY entry {
param = (f32[4096,4096]{1,0}, f32[4096,4096]{1,0}, f32[4096,4096]{1,0})
parameter(0), sharding={{maximal device=0}, {replicated}, {replicated}}
get-tuple-element.2 = f32[4096,4096]{1,0} get-tuple-element(param), index=0
get-tuple-element.3 = f32[4096,4096]{1,0} get-tuple-element(param), index=1
get-tuple-element.4 = f32[4096,4096]{1,0} get-tuple-element(param), index=2
after-all.1 = token[] after-all()
replica-id = u32[] replica-id()
partition-id = u32[] partition-id()
infeed = ((f32[4096,4096]{1,0}, f32[8,8]{1,0}), token[]) infeed(after-all.1),
sharding={{maximal device=0}, {replicated}, {maximal device=0}}
infeed-data = (f32[4096,4096]{1,0}, f32[8,8]{1,0}) get-tuple-element(infeed),
index=0
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed-data),
index=0
get-tuple-element.6 = f32[8,8]{1,0} get-tuple-element(infeed-data), index=1
dot = f32[4096,4096]{1,0} dot(get-tuple-element.5, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
dot.2 = f32[4096,4096]{1,0} dot(get-tuple-element.4, get-tuple-element.3),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
all-reduce = f32[4096,4096]{1,0} all-reduce(dot), replica_groups={},
to_apply=sum
all-reduce.2 = f32[4096,4096]{1,0} all-reduce(dot.2), replica_groups={},
to_apply=sum
all-reduce-subgroup = f32[4096,4096]{1,0} all-reduce(dot),
replica_groups={{0,1},{2,3}}, to_apply=sum
all-reduce-partitions = f32[4096,4096]{1,0} all-reduce(get-tuple-element.2),
channel_id=1, to_apply=sum
all-reduce-partitions.2 = f32[4096,4096]{1,0} all-reduce(get-tuple-element.4),
channel_id=1, to_apply=sum
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions)
subtract.2 = f32[4096,4096]{1,0} subtract(get-tuple-element.3,
all-reduce-partitions.2)
all-reduce-same-operand = u32[] all-reduce(replica-id), to_apply=sum.u32
all-reduce-same-operand-subgroup = u32[] all-reduce(replica-id),
replica_groups={{0,1},{2,3}}, to_apply=sum.u32
all-reduce-different-operand = u32[] all-reduce(partition-id),
to_apply=sum.u32
add = f32[4096,4096]{1,0} add(get-tuple-element.2, subtract)
ROOT add.2 = f32[4096,4096]{1,0} add(get-tuple-element.4, subtract.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 4));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{false, true, false});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(module.get(), true));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.3"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.6"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dot.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-partitions.2"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "subtract.2"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "replica-id"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "partition-id"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-same-operand-subgroup"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "all-reduce-different-operand"), {}));
}
TEST_F(HloReplicationAnalysisTest, NestedCall) {
const std::string module_str = R"(
HloModule NestedCall
fusion_computation {
fusion_p0 = f32[] parameter(0)
fusion_p1 = f32[] parameter(1)
add = f32[] add(fusion_p0, fusion_p0)
multiply = f32[] multiply(add, fusion_p1)
ROOT tuple = (f32[], f32[]) tuple(add, multiply)
}
call_body {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT fusion = (f32[], f32[]) fusion(a, b), kind=kLoop, calls=fusion_computation
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ROOT call = (f32[], f32[]) call(get-tuple-element, get-tuple-element.1), to_apply=call_body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, false});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "add"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "fusion"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "call"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleWhileLoop) {
const std::string module_str = R"(
HloModule SimpleWhileLoop
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY SimpleWhileLoop {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest,
WhileLoopParameterAliasingNonReplicatedOutput) {
const std::string module_str = R"(
HloModule WhileLoopParameterAliasingNonReplicatedOutput
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
after-all.1 = token[] after-all()
infeed = (f32[4096,4096]{1,0}, token[]) infeed(after-all.1)
get-tuple-element.5 = f32[4096,4096]{1,0} get-tuple-element(infeed), index=0
subtract = f32[4096,4096]{1,0} subtract(get-tuple-element.5, multiply)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
constant.1 = u32[] constant(1)
add = u32[] add(get-tuple-element.6, constant.1)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(subtract, add)
}
ENTRY WhileLoopParameterAliasingNonReplicatedOutput {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "multiply"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, WhileLoopDifferentCondition) {
const std::string module_str = R"(
HloModule WhileLoopDifferentCondition
cond {
cond_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(cond_param), index=1
constant.3 = u32[] constant(5)
ROOT greater-than = pred[] compare(get-tuple-element, constant.3), direction=LT
}
body {
body_param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
get-tuple-element.1 = f32[4096,4096]{1,0} get-tuple-element(body_param), index=0
multiply = f32[4096,4096]{1,0} multiply(get-tuple-element.1, get-tuple-element.1)
get-tuple-element.6 = u32[] get-tuple-element(body_param), index=1
replica-id = u32[] replica-id()
add = u32[] add(get-tuple-element.6, replica-id)
ROOT tuple = (f32[4096,4096]{1,0}, u32[]) tuple(multiply, add)
}
ENTRY WhileLoopDifferentCondition {
param = (f32[4096,4096]{1,0}, u32[]) parameter(0)
ROOT while = (f32[4096,4096]{1,0}, u32[]) while(param), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "while"), {1}));
}
TEST_F(HloReplicationAnalysisTest, SimpleConditional) {
const std::string module_str = R"(
HloModule SimpleConditional
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[]), s32[]) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
get-tuple-element.7 = s32[] get-tuple-element(param), index=3
ROOT conditional = (f32[], f32[]) conditional(get-tuple-element.7, get-tuple-element.4, get-tuple-element.5, get-tuple-element.6), branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, false, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, ConditionalWithDifferentPredicates) {
const std::string module_str = R"(
HloModule ConditionalWithDifferentPredicates
Negate {
x = (f32[], f32[]) parameter(0)
get-tuple-element = f32[] get-tuple-element(x), index=0
negate = f32[] negate(get-tuple-element)
get-tuple-element.1 = f32[] get-tuple-element(x), index=1
negate.1 = f32[] negate(get-tuple-element.1)
ROOT tuple = (f32[], f32[]) tuple(negate, negate.1)
}
Identity {
ROOT y = (f32[], f32[]) parameter(0)
}
Floor {
z = (f32[], f32[]) parameter(0)
get-tuple-element.2 = f32[] get-tuple-element(z), index=0
floor = f32[] floor(get-tuple-element.2)
get-tuple-element.3 = f32[] get-tuple-element(z), index=1
floor.1 = f32[] floor(get-tuple-element.3)
ROOT tuple.1 = (f32[], f32[]) tuple(floor, floor.1)
}
ENTRY entry {
param = ((f32[], f32[]), (f32[], f32[]), (f32[], f32[])) parameter(0)
get-tuple-element.4 = (f32[], f32[]) get-tuple-element(param), index=0
get-tuple-element.5 = (f32[], f32[]) get-tuple-element(param), index=1
get-tuple-element.6 = (f32[], f32[]) get-tuple-element(param), index=2
replica-id = u32[] replica-id()
id = s32[] bitcast-convert(replica-id)
ROOT conditional = (f32[], f32[]) conditional(id, get-tuple-element.4,
get-tuple-element.5, get-tuple-element.6),
branch_computations={Negate, Identity, Floor}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool>{true, true, true, true, true, true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "y"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {0}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "conditional"), {1}));
}
TEST_F(HloReplicationAnalysisTest, X64SplitCombine) {
const std::string module_str = R"(
HloModule SimpleX64SplitCombine
ENTRY entry {
param = (f64[]) parameter(0)
gte = f64[] get-tuple-element(param), index=0
param-low = f32[] custom-call(gte), custom_call_target="X64SplitLow"
param-high = f32[] custom-call(gte), custom_call_target="X64SplitHigh"
ROOT result-combine = f64[] custom-call(param-low, param-high), custom_call_target="X64Combine"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_str));
auto param = module->entry_computation()->parameter_instruction(0);
param->set_parameter_replicated_at_leaf_buffers(absl::Span<const bool>{true});
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-low"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "param-high"), {}));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "result-combine"), {}));
}
TEST_F(HloReplicationAnalysisTest, CrossModuleAndReplicaAllReduce) {
const std::string module_str = R"(
HloModule CrossModuleAndReplicaAllReduce
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
ROOT tuple = (f32[], f32[]) tuple(ar0, ar1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ar1"), {}));
}
TEST_F(HloReplicationAnalysisTest, GlobalIdAllGather) {
const std::string module_str = R"(
HloModule GlobalIdAllGather
ENTRY entry {
param = f32[1] parameter(0)
ag1 = f32[2] all-gather(param), replica_groups={{0,1},{2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=1
ag2 = f32[2] all-gather(param), replica_groups={{0,2},{1,3}}, dimensions={0},
use_global_device_ids=true, channel_id=2
ag3 = f32[4] all-gather(param), replica_groups={{0,1,2,3}}, dimensions={0},
use_global_device_ids=true, channel_id=3
ROOT tuple = (f32[2], f32[2], f32[4]) tuple(ag1, ag2, ag3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 2,
2));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::Run(module.get(),
false));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> partition_analysis,
HloReplicationAnalysis::Run(module.get(),
true));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag1"), {}));
EXPECT_FALSE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag2"), {}));
EXPECT_TRUE(partition_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "ag3"), {}));
}
TEST_F(HloReplicationAnalysisTest, PartiallyReplicatedDynamicSlice) {
const std::string module_str = R"(
HloModule PartiallyReplicatedDynamicSlice
ENTRY entry {
constant = s32[8] constant({1, 3, 9, 10, 1, 3, 9, 10})
replica-id = u32[] replica-id()
ROOT dynamic-slice = s32[1] dynamic-slice(constant, replica-id), dynamic_slice_sizes={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndReturnVerifiedModule(module_str, 8,
1));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<HloReplicationAnalysis> replica_analysis,
HloReplicationAnalysis::RunWithPartialReplication(
module.get(),
false));
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}));
std::vector<ReplicaGroup> replica_groups(4);
replica_groups[0].add_replica_ids(0);
replica_groups[0].add_replica_ids(4);
replica_groups[1].add_replica_ids(1);
replica_groups[1].add_replica_ids(5);
replica_groups[2].add_replica_ids(2);
replica_groups[2].add_replica_ids(6);
replica_groups[3].add_replica_ids(3);
replica_groups[3].add_replica_ids(7);
EXPECT_TRUE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups));
std::vector<ReplicaGroup> replica_groups_2(2);
replica_groups_2[0].add_replica_ids(0);
replica_groups_2[0].add_replica_ids(1);
replica_groups_2[0].add_replica_ids(2);
replica_groups_2[0].add_replica_ids(3);
replica_groups_2[1].add_replica_ids(4);
replica_groups_2[1].add_replica_ids(5);
replica_groups_2[1].add_replica_ids(6);
replica_groups_2[1].add_replica_ids(7);
EXPECT_FALSE(replica_analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "dynamic-slice"), {}, replica_groups_2));
}
TEST_F(HloReplicationAnalysisTest, OptimizationBarrier) {
const std::string module_str = R"(
HloModule OptimizationBarrier
sum {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry {
param = (f32[], f32[]) parameter(0)
get-tuple-element.0 = f32[] get-tuple-element(param), index=0
get-tuple-element.1 = f32[] get-tuple-element(param), index=1
ar0 = f32[] all-reduce(get-tuple-element.0), to_apply=sum, replica_groups={{0,1}}
ar1 = f32[] all-reduce(get-tuple-element.1), to_apply=sum, replica_groups={{0},{1}}
tuple = (f32[], f32[]) tuple(ar0, ar1)
opt-barrier = (f32[], f32[]) opt-barrier(tuple)
gte.0 = f32[] get-tuple-element(opt-barrier), index=0
gte.1 = f32[] get-tuple-element(opt-barrier), index=1
ROOT tuple.1 = (f32[], f32[]) tuple(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
module_str, 2));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloReplicationAnalysis> analysis,
HloReplicationAnalysis::Run(
module.get(), false));
EXPECT_TRUE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.0"), {}));
EXPECT_FALSE(analysis->HloInstructionIsReplicatedAt(
FindInstruction(module.get(), "gte.1"), {}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_replication_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_replication_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
05aaaaf2-d827-4812-8afe-3e128f7a72e5 | cpp | tensorflow/tensorflow | gpu_model | tensorflow/lite/delegates/gpu/common/gpu_model.cc | tensorflow/lite/delegates/gpu/cl/testing/gpu_model_test.cc | #include "tensorflow/lite/delegates/gpu/common/gpu_model.h"
#include <algorithm>
#include <any>
#include <map>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/operation_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/special_selector.h"
#include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h"
#include "tensorflow/lite/delegates/gpu/common/task/serialization_base.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/add_bias.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h"
#include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h"
namespace tflite {
namespace gpu {
namespace {
bool IsReady(const absl::flat_hash_set<ValueId>& ready_tensors,
const GpuNode& node) {
for (const ValueId in_id : node.inputs) {
if (ready_tensors.find(in_id) == ready_tensors.end()) {
return false;
}
}
return true;
}
absl::Status MergeGpuNodes(const GpuInfo& gpu_info, GpuNode* src,
GpuNode* dst) {
for (int j = 1; j < src->inputs.size(); ++j) {
dst->inputs.push_back(src->inputs[j]);
}
dst->outputs[0] = src->outputs[0];
dst->name += " -> " + src->name;
return dst->gpu_operation->AddOperation(gpu_info, src->gpu_operation.get());
}
flatbuffers::Offset<data::TensorDescWithId> Encode(
const TensorDescriptor& desc, const ValueId& id,
flatbuffers::FlatBufferBuilder* builder) {
auto desc_fb = Encode(desc, builder);
data::TensorDescWithIdBuilder desc_builder(*builder);
desc_builder.add_desc(desc_fb);
desc_builder.add_id(id);
return desc_builder.Finish();
}
flatbuffers::Offset<data::GpuNode> Encode(
const GpuNode& node, flatbuffers::FlatBufferBuilder* builder) {
auto op_fb = Encode(*node.gpu_operation, builder);
std::vector<int32_t> in_ids(node.inputs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = node.inputs[i];
}
std::vector<int32_t> out_ids(node.outputs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = node.outputs[i];
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto out_ids_fb = builder->CreateVector(out_ids);
auto name_fb = builder->CreateString(node.name);
data::GpuNodeBuilder node_builder(*builder);
node_builder.add_gpu_op(op_fb);
node_builder.add_input_ids(in_ids_fb);
node_builder.add_output_ids(out_ids_fb);
node_builder.add_name(name_fb);
return node_builder.Finish();
}
absl::Status Decode(const data::GpuNode* fb_node, GpuNode* node) {
GPUOperation op;
RETURN_IF_ERROR(Decode(fb_node->gpu_op(), &op));
node->gpu_operation = std::make_unique<GPUOperation>(std::move(op));
for (auto in_fb : *fb_node->input_ids()) {
node->inputs.push_back(in_fb);
}
for (auto out_fb : *fb_node->output_ids()) {
node->outputs.push_back(out_fb);
}
node->name = std::string(fb_node->name()->c_str(), fb_node->name()->size());
return absl::OkStatus();
}
bool IsAssociativeLinkableOp(const Node& node,
const std::vector<Value*>& inputs,
const std::vector<Value*>& outputs) {
if (inputs.size() == 1) {
return false;
}
const OperationType op_type = OperationTypeFromString(node.operation.type);
if (op_type != OperationType::ADD && op_type != OperationType::MUL) {
return false;
}
const auto dst_shape = outputs[0]->tensor.shape;
for (int i = 0; i < inputs.size(); ++i) {
const auto src_shape = inputs[i]->tensor.shape;
if (dst_shape.b != src_shape.b && src_shape.b == 1) {
return false;
}
if (dst_shape.h != src_shape.h && src_shape.h == 1) {
return false;
}
if (dst_shape.w != src_shape.w && src_shape.w == 1) {
return false;
}
if (dst_shape.c != src_shape.c && src_shape.c == 1) {
return false;
}
}
return true;
}
absl::Status CheckExternalTensorDescription(const GpuInfo& gpu_info,
const TensorDescriptor& tensor_desc,
const BHWC& shape,
DataType data_type) {
if (tensor_desc.GetDataType() != data_type) {
return absl::InvalidArgumentError(
"Global precision and precision of predefined/external tensors must be "
"synchronized.");
}
if (tensor_desc.HasAxis(Axis::DEPTH)) {
return absl::InvalidArgumentError(
"Currently no support of Depth dimension in predefined/external "
"tensors.");
}
if (tensor_desc.HasAxis(Axis::BATCH) && shape.b == 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.HasAxis(Axis::BATCH) && shape.b != 1) {
return absl::InvalidArgumentError("Wrong layout, batch mismatch.");
}
if (!tensor_desc.CanCreateTensorWithShape(gpu_info, shape).ok()) {
return absl::UnavailableError(
"Current device can not allocate tensor with this shape for "
"predefined/external descriptor.");
}
return absl::OkStatus();
}
class TensorReserver {
public:
TensorReserver() : next_(0) {}
ValueId Add(const TensorDescriptor& dummy) {
reservations_[next_] = dummy;
return next_++;
}
void Add(ValueId id, const TensorDescriptor& dummy) {
reservations_[id] = dummy;
}
ValueId GetNewId() { return next_++; }
void SetNext(ValueId id) { next_ = id; }
TensorDescriptor Get(ValueId id) { return reservations_[id]; }
public:
absl::flat_hash_map<ValueId, TensorDescriptor> reservations_;
ValueId next_;
};
absl::Status ReserveGraphTensors(const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info,
const GraphFloat32& graph,
TensorReserver* tensor_reserver) {
ValueId max_id = 0;
auto tensors = graph.values();
for (auto& t : tensors) {
auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
if (t->tensor.type != DataType::FLOAT32 &&
t->tensor.type != DataType::FLOAT16) {
data_type = t->tensor.type;
}
const auto shape = graph.GetValue(t->id)->tensor.shape;
auto it_predefined = create_info.predefined.find(t->id);
auto it_immutable_external =
create_info.external_immutable_tensors.find(t->id);
auto it_mutable_external = create_info.external_mutable_tensors.find(t->id);
int external_categories_count = 0;
TensorDescriptor tensor_desc;
if (it_predefined != create_info.predefined.end()) {
external_categories_count++;
tensor_desc = it_predefined->second;
}
if (it_immutable_external != create_info.external_immutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_immutable_external->second->GetDescriptor();
}
if (it_mutable_external != create_info.external_mutable_tensors.end()) {
external_categories_count++;
tensor_desc = it_mutable_external->second;
}
if (external_categories_count > 1) {
return absl::InvalidArgumentError(
"Tensors ids from predefined / external_immutable_tensors / "
"external_mutable_tensors should not intersect.");
}
if (external_categories_count == 1) {
if (!(graph.IsGraphInput(t->id) || graph.IsGraphOutput(t->id))) {
return absl::InvalidArgumentError(
"Currently external can be used only for graph inputs/outputs");
}
RETURN_IF_ERROR(CheckExternalTensorDescription(gpu_info, tensor_desc,
shape, data_type));
} else {
TensorStorageType storage_type = create_info.storage_type;
Layout layout = shape.b == 1 ? Layout::HWC : Layout::BHWC;
const bool can_use_single_texture =
storage_type == TensorStorageType::TEXTURE_2D ||
storage_type == TensorStorageType::TEXTURE_3D ||
storage_type == TensorStorageType::TEXTURE_ARRAY;
if (shape.c < 4 && can_use_single_texture &&
TensorDescriptor{data_type, TensorStorageType::SINGLE_TEXTURE_2D,
layout}
.CanCreateTensorWithShape(gpu_info, shape)
.ok()) {
storage_type = TensorStorageType::SINGLE_TEXTURE_2D;
}
tensor_desc = TensorDescriptor{data_type, storage_type, layout};
RETURN_IF_ERROR(
tensor_desc.UpdateToSupportedStorageType(gpu_info, shape));
if (gpu_info.IsApiMetal() &&
storage_type == TensorStorageType::TEXTURE_2D) {
if (!(gpu_info.IsApple() && gpu_info.apple_info.IsFamilyApple1())) {
tensor_desc.SetUseBufferForWriteOnlyTexture2d(true);
}
}
}
tensor_desc.SetBHWCShape(shape);
tensor_reserver->Add(t->id, tensor_desc);
max_id = std::max(max_id, t->id);
}
tensor_reserver->SetNext(max_id + 1);
return absl::OkStatus();
}
absl::Status ConvertOperations(const GpuInfo& gpu_info,
const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
TensorReserver* tensor_reserver,
GpuModel* gpu_model) {
std::map<ValueId, TensorDescriptor> tensor_descriptors;
const auto values = graph.values();
for (auto value : values) {
tensor_descriptors[value->id] = tensor_reserver->Get(value->id);
}
std::set<NodeId> consumed_nodes;
std::vector<Node*> graph_nodes = graph.nodes();
std::map<ValueId, int>
tensor_usages;
for (const auto& input : gpu_model->input_ids_and_refs) {
tensor_usages[input.first] = -1;
}
std::vector<SharedWeightsConvDesc> shared_conv_weights;
std::vector<SharedWeightsConvDesc>* shared_conv_weights_ptr =
create_info.hints.Check(ModelHints::kReuseConvWeights)
? &shared_conv_weights
: nullptr;
for (int i = 0; i < graph_nodes.size(); ++i) {
const Node& node = *graph_nodes[i];
if (consumed_nodes.find(node.id) != consumed_nodes.end()) {
continue;
}
auto op_type = OperationTypeFromString(node.operation.type);
if (op_type == OperationType::CONSTANT) {
auto attr =
std::any_cast<ConstTensorAttributes>(node.operation.attributes);
auto outputs = graph.FindOutputs(node.id);
gpu_model->const_tensors[outputs[0]->id] =
tensor_reserver->Get(outputs[0]->id);
gpu_model->const_tensors[outputs[0]->id].UploadData(attr.tensor);
continue;
}
GPUOperationsSubgraph gpu_subgraph;
if (GPUSubgraphFromGraph(create_info.hints, gpu_info, create_info.precision,
graph, node.id, tensor_descriptors,
&consumed_nodes, &gpu_subgraph)
.ok()) {
} else {
auto inputs = graph.FindInputs(node.id);
auto outputs = graph.FindOutputs(node.id);
if (IsAssociativeLinkableOp(node, inputs, outputs)) {
int latest_written_tensor_index = 0;
int last_usage = tensor_usages[inputs[0]->id];
for (int j = 1; j < inputs.size(); ++j) {
if (tensor_usages[inputs[j]->id] > last_usage) {
last_usage = tensor_usages[inputs[j]->id];
latest_written_tensor_index = j;
}
}
std::swap(inputs[0], inputs[latest_written_tensor_index]);
}
consumed_nodes.insert(node.id);
OperationDef op_def;
op_def.precision = create_info.precision;
for (int j = 0; j < inputs.size(); ++j) {
op_def.src_tensors.push_back(tensor_reserver->Get(inputs[j]->id));
}
for (int j = 0; j < outputs.size(); ++j) {
op_def.dst_tensors.push_back(tensor_reserver->Get(outputs[j]->id));
}
RETURN_IF_ERROR(GPUOperationFromNode(
gpu_info, op_def, create_info.hints, inputs, outputs, node,
shared_conv_weights_ptr, &gpu_subgraph));
}
absl::flat_hash_map<int, ValueId> mapping_to_global_ids;
for (int j = 0; j < gpu_subgraph.new_tensors.size(); ++j) {
const auto& t = gpu_subgraph.new_tensors[j];
if (!t.GetData().empty()) {
auto global_id = tensor_reserver->GetNewId();
gpu_model->const_tensors[global_id] =
std::move(gpu_subgraph.new_tensors[j]);
mapping_to_global_ids[j] = global_id;
} else {
auto global_id = tensor_reserver->Add(t);
mapping_to_global_ids[j] = global_id;
}
}
if (!shared_conv_weights.empty() && !mapping_to_global_ids.empty()) {
shared_conv_weights.back().RemapIds(mapping_to_global_ids);
}
for (auto& gpu_op : gpu_subgraph.operations) {
GpuNode gpu_node;
gpu_node.gpu_operation = std::move(gpu_op.operation);
gpu_node.inputs.resize(gpu_op.input_ids.size());
for (int j = 0; j < gpu_op.input_ids.size(); ++j) {
int id = gpu_op.input_ids[j];
if (id >= 0) {
gpu_node.inputs[j] = id;
} else {
gpu_node.inputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.outputs.resize(gpu_op.output_ids.size());
for (int j = 0; j < gpu_op.output_ids.size(); ++j) {
int id = gpu_op.output_ids[j];
if (id >= 0) {
gpu_node.outputs[j] = id;
tensor_usages[id] = i;
} else {
gpu_node.outputs[j] = mapping_to_global_ids[-(id + 1)];
}
}
gpu_node.name = gpu_op.name;
gpu_model->nodes.push_back(std::move(gpu_node));
}
}
return absl::OkStatus();
}
absl::Status MergeElementwiseNodes(const GpuInfo& gpu_info,
GpuModel* gpu_model) {
auto& nodes = gpu_model->nodes;
for (int elem_root_index = 1; elem_root_index < nodes.size();
++elem_root_index) {
auto& elem_root = nodes[elem_root_index];
if (!(elem_root.inputs.size() == 1 || elem_root.inputs.size() == 2) ||
elem_root.outputs.size() != 1 ||
!elem_root.gpu_operation->IsLinkable()) {
continue;
}
std::map<int, int> prev_nodes;
for (int j = elem_root_index - 1; j >= 0; --j) {
for (int k = 0; k < elem_root.inputs.size(); ++k) {
if (elem_root.inputs[k] == nodes[j].outputs[0]) {
prev_nodes[k] = j;
break;
}
}
}
if (prev_nodes.size() == 1) {
if (elem_root.inputs.size() != 1) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
auto& prev_node = nodes[prev_first_node_index];
if (prev_node.inputs.size() != 1 || prev_node.outputs.size() != 1 ||
!prev_node.gpu_operation->IsLinkable()) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(FuseSimpleElemWithSimpleElem(
gpu_info, std::move(*prev_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (prev_nodes.size() == 2) {
if (elem_root.inputs.size() != 2 ||
elem_root.gpu_operation->GetElementwiseInputsCount() != 2) {
continue;
}
const int prev_first_node_index = prev_nodes[0];
const int prev_second_node_index = prev_nodes[1];
auto& prev_first_node = nodes[prev_first_node_index];
auto& prev_second_node = nodes[prev_second_node_index];
if (prev_first_node.gpu_operation->IsLinkable() &&
!prev_second_node.gpu_operation->IsLinkable() &&
prev_second_node.outputs.size() == 1 &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 ||
first_node_parent_index != prev_second_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsFirstInput(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_first_node_index] = std::move(new_node);
elem_root_index = prev_first_node_index;
continue;
}
if (!prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (second_node_parent_index == -1 ||
second_node_parent_index != prev_first_node_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWithSimpleElemAsSecondInput(
gpu_info, std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_second_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_second_node.name + " -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
nodes.erase(nodes.begin() + elem_root_index);
nodes[prev_second_node_index] = std::move(new_node);
elem_root_index = prev_second_node_index;
continue;
}
if (prev_first_node.gpu_operation->IsLinkable() &&
prev_second_node.gpu_operation->IsLinkable() &&
prev_first_node.inputs.size() == 1 &&
prev_first_node.outputs.size() == 1 &&
prev_second_node.inputs.size() == 1 &&
prev_second_node.outputs.size() == 1) {
int first_node_parent_index = -1;
for (int j = prev_first_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_first_node.inputs[0]) {
first_node_parent_index = j;
break;
}
}
int second_node_parent_index = -1;
for (int j = prev_second_node_index - 1; j >= 0; --j) {
if (nodes[j].outputs[0] == prev_second_node.inputs[0]) {
second_node_parent_index = j;
break;
}
}
if (first_node_parent_index == -1 || second_node_parent_index == -1 ||
first_node_parent_index != second_node_parent_index) {
continue;
}
int consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[1]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
consumers_count = 0;
for (const auto& node : nodes) {
for (const auto& input : node.inputs) {
if (input == elem_root.inputs[0]) {
consumers_count++;
}
}
}
if (consumers_count != 1) {
continue;
}
GPUOperation new_operation;
RETURN_IF_ERROR(Fuse2InputElemWith2SimpleElem(
gpu_info, std::move(*prev_first_node.gpu_operation.get()),
std::move(*prev_second_node.gpu_operation.get()),
std::move(*elem_root.gpu_operation.get()), &new_operation));
GpuNode new_node;
new_node.inputs.push_back(prev_first_node.inputs[0]);
new_node.outputs.push_back(elem_root.outputs[0]);
new_node.name = prev_first_node.name + " -> " + prev_second_node.name +
" -> " + elem_root.name;
new_node.gpu_operation =
std::make_unique<GPUOperation>(std::move(new_operation));
int first_prev_node_index =
std::min(prev_first_node_index, prev_second_node_index);
int second_prev_node_index =
std::max(prev_first_node_index, prev_second_node_index);
nodes.erase(nodes.begin() + elem_root_index);
nodes.erase(nodes.begin() + second_prev_node_index);
nodes[first_prev_node_index] = std::move(new_node);
elem_root_index = first_prev_node_index - 1;
continue;
}
}
}
return absl::OkStatus();
}
absl::Status MergeNodes(const GpuInfo& gpu_info, GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> ready_tensors;
absl::flat_hash_set<ValueId> output_tensors;
for (const auto& input : gpu_model->input_ids_and_refs) {
ready_tensors.insert(input.first);
}
for (const auto& output : gpu_model->output_ids_and_refs) {
output_tensors.insert(output.first);
}
auto& nodes = gpu_model->nodes;
for (int i = 0; i < nodes.size(); ++i) {
auto& node = nodes[i];
bool node_has_graph_output = false;
for (const auto& out_id : node.outputs) {
ready_tensors.insert(out_id);
if (output_tensors.find(out_id) != output_tensors.end()) {
node_has_graph_output = true;
}
}
if (node_has_graph_output || node.outputs.size() != 1) {
continue;
}
std::vector<int> next_nodes;
int link_index = 0;
for (int j = i + 1; j < nodes.size(); ++j) {
for (int k = 0; k < nodes[j].inputs.size(); ++k) {
if (nodes[j].inputs[k] == node.outputs[0]) {
next_nodes.push_back(j);
link_index = k;
}
}
}
if (next_nodes.size() != 1 || link_index != 0) {
continue;
}
auto& linkable_node = nodes[next_nodes[0]];
if (!linkable_node.gpu_operation->IsLinkable() ||
linkable_node.outputs.size() != 1 ||
!IsReady(ready_tensors, linkable_node)) {
continue;
}
RETURN_IF_ERROR(MergeGpuNodes(gpu_info, &linkable_node, &node));
nodes.erase(nodes.begin() + next_nodes[0]);
i -= 1;
}
return absl::OkStatus();
}
void CopyExternals(const GraphFloat32& graph, GpuModel* gpu_model) {
const auto inputs = graph.inputs();
for (const auto& value : inputs) {
gpu_model->input_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto variable_inputs = graph.variable_inputs();
for (const auto& value : variable_inputs) {
gpu_model->variable_ids_and_refs.push_back({value->id, value->tensor.ref});
}
const auto outputs = graph.outputs();
for (const auto& value : outputs) {
gpu_model->output_ids_and_refs.push_back({value->id, value->tensor.ref});
}
}
void RemoveUnusedTensors(GpuModel* gpu_model) {
absl::flat_hash_set<ValueId> used_tensors;
for (const auto& node : gpu_model->nodes) {
for (const auto& id : node.inputs) {
used_tensors.insert(id);
}
for (const auto& id : node.outputs) {
used_tensors.insert(id);
}
}
for (const auto& inputs : gpu_model->input_ids_and_refs) {
used_tensors.insert(inputs.first);
}
for (const auto& outputs : gpu_model->output_ids_and_refs) {
used_tensors.insert(outputs.first);
}
for (auto it = gpu_model->tensors.begin(); it != gpu_model->tensors.end();) {
if (used_tensors.find(it->first) == used_tensors.end()) {
gpu_model->tensors.erase(it++);
} else {
++it;
}
}
}
absl::Status ResolvePolymorphicArgs(GpuModel* gpu_model) {
class DummySpatialTensor : public GpuSpatialTensor {
public:
DummySpatialTensor() = default;
explicit DummySpatialTensor(const BHWDC& shape,
const TensorDescriptor& tensor_desc)
: shape_(shape), tensor_desc_(tensor_desc) {}
~DummySpatialTensor() override = default;
int Width() const override { return shape_.w; }
int Height() const override { return shape_.h; }
int Depth() const override { return shape_.d; }
int Channels() const override { return shape_.c; }
int Slices() const override { return DivideRoundUp(shape_.c, 4); }
int Batch() const override { return shape_.b; }
TensorDescriptor GetDescriptor() const override { return tensor_desc_; }
private:
BHWDC shape_;
TensorDescriptor tensor_desc_;
};
for (auto& node : gpu_model->nodes) {
std::vector<DummySpatialTensor> src_tensors(node.inputs.size());
for (int i = 0; i < node.inputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.inputs[i]];
src_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetSrc(&src_tensors[i], i);
}
std::vector<DummySpatialTensor> dst_tensors(node.outputs.size());
for (int i = 0; i < node.outputs.size(); ++i) {
const auto& tensor_desc = gpu_model->tensors[node.outputs[i]];
dst_tensors[i] =
DummySpatialTensor(tensor_desc.GetBHWDCShape(), tensor_desc);
node.gpu_operation->SetDst(&dst_tensors[i], i);
}
RETURN_IF_ERROR(
node.gpu_operation->BindArguments(&node.gpu_operation->args_));
node.gpu_operation->RecalculateGridSize();
}
return absl::OkStatus();
}
}
absl::Status GraphToGpuModel(const GraphFloat32& graph,
const CreateGpuModelInfo& create_info,
const GpuInfo& gpu_info, GpuModel* gpu_model) {
TensorReserver tensor_reserver;
RETURN_IF_ERROR(
ReserveGraphTensors(create_info, gpu_info, graph, &tensor_reserver));
CopyExternals(graph, gpu_model);
RETURN_IF_ERROR(ConvertOperations(gpu_info, graph, create_info,
&tensor_reserver, gpu_model));
RETURN_IF_ERROR(MergeElementwiseNodes(gpu_info, gpu_model));
RETURN_IF_ERROR(MergeNodes(gpu_info, gpu_model));
gpu_model->tensors = std::move(tensor_reserver.reservations_);
RemoveUnusedTensors(gpu_model);
for (auto& node : gpu_model->nodes) {
RETURN_IF_ERROR(node.gpu_operation->AssembleCode(gpu_info));
}
return ResolvePolymorphicArgs(gpu_model);
}
flatbuffers::Offset<data::GpuModel> Encode(
const GpuModel& gpu_model, flatbuffers::FlatBufferBuilder* builder) {
std::vector<int32_t> in_ids(gpu_model.input_ids_and_refs.size());
std::vector<int64_t> in_refs(gpu_model.input_ids_and_refs.size());
for (int i = 0; i < in_ids.size(); ++i) {
in_ids[i] = gpu_model.input_ids_and_refs[i].first;
in_refs[i] = gpu_model.input_ids_and_refs[i].second;
}
auto in_ids_fb = builder->CreateVector(in_ids);
auto in_refs_fb = builder->CreateVector(in_refs);
std::vector<int32_t> out_ids(gpu_model.output_ids_and_refs.size());
std::vector<int64_t> out_refs(gpu_model.output_ids_and_refs.size());
for (int i = 0; i < out_ids.size(); ++i) {
out_ids[i] = gpu_model.output_ids_and_refs[i].first;
out_refs[i] = gpu_model.output_ids_and_refs[i].second;
}
auto out_ids_fb = builder->CreateVector(out_ids);
auto out_refs_fb = builder->CreateVector(out_refs);
std::vector<flatbuffers::Offset<data::GpuNode>> nodes_fb;
for (int i = 0; i < gpu_model.nodes.size(); ++i) {
auto node_fb = Encode(gpu_model.nodes[i], builder);
nodes_fb.push_back(node_fb);
}
auto nodes_fb_vec = builder->CreateVector(nodes_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> tensors_fb;
for (const auto& tensor : gpu_model.tensors) {
auto tensor_fb = Encode(tensor.second, tensor.first, builder);
tensors_fb.push_back(tensor_fb);
}
auto tensors_fb_vec = builder->CreateVector(tensors_fb);
std::vector<flatbuffers::Offset<data::TensorDescWithId>> const_tensors_fb;
for (const auto& tensor : gpu_model.const_tensors) {
auto tensor_fb = Encode(tensor.second, tensor.first, builder);
const_tensors_fb.push_back(tensor_fb);
}
auto const_tensors_fb_vec = builder->CreateVector(const_tensors_fb);
std::vector<flatbuffers::Offset<data::PairOfValueIds>>
variable_ids_and_refs_fb;
for (auto& pair : gpu_model.variable_ids_and_refs) {
data::PairOfValueIdsBuilder pair_builder(*builder);
pair_builder.add_first(pair.first);
pair_builder.add_second(pair.second);
variable_ids_and_refs_fb.push_back(pair_builder.Finish());
}
auto variable_ids_and_refs_fb_vec =
builder->CreateVector(variable_ids_and_refs_fb);
data::GpuModelBuilder gpu_model_builder(*builder);
gpu_model_builder.add_nodes(nodes_fb_vec);
gpu_model_builder.add_tensors(tensors_fb_vec);
gpu_model_builder.add_const_tensors(const_tensors_fb_vec);
gpu_model_builder.add_input_ids(in_ids_fb);
gpu_model_builder.add_output_ids(out_ids_fb);
gpu_model_builder.add_variable_ids_and_refs(variable_ids_and_refs_fb_vec);
gpu_model_builder.add_input_refs(in_refs_fb);
gpu_model_builder.add_output_refs(out_refs_fb);
return gpu_model_builder.Finish();
}
absl::Status Decode(const data::GpuModel* fb_gpu_model, GpuModel* gpu_model) {
gpu_model->nodes.resize(fb_gpu_model->nodes()->size());
int counter = 0;
for (auto node_fb : *fb_gpu_model->nodes()) {
RETURN_IF_ERROR(Decode(node_fb, &gpu_model->nodes[counter]));
counter++;
}
for (const auto& tensor_fb : *fb_gpu_model->tensors()) {
TensorDescriptor desc;
Decode(tensor_fb->desc(), &desc);
gpu_model->tensors[tensor_fb->id()] = std::move(desc);
}
for (const auto& tensor_fb : *fb_gpu_model->const_tensors()) {
TensorDescriptor desc;
Decode(tensor_fb->desc(), &desc);
gpu_model->const_tensors[tensor_fb->id()] = std::move(desc);
}
for (int i = 0; i < fb_gpu_model->input_ids()->size(); ++i) {
gpu_model->input_ids_and_refs.push_back(
{(*fb_gpu_model->input_ids())[i], (*fb_gpu_model->input_refs())[i]});
}
for (int i = 0; i < fb_gpu_model->output_ids()->size(); ++i) {
gpu_model->output_ids_and_refs.push_back(
{(*fb_gpu_model->output_ids())[i], (*fb_gpu_model->output_refs())[i]});
}
for (auto variable_id : *fb_gpu_model->variable_ids_and_refs()) {
gpu_model->variable_ids_and_refs.push_back(
{variable_id->first(), variable_id->second()});
}
return absl::OkStatus();
}
absl::Status RunGraphTransformsForGpuModel(GraphFloat32* graph) {
auto merge_padding_transform = NewMergePaddingWithAdd();
auto add_bias_transform = NewAddBias();
auto pooling_to_reduce_op = NewGlobalPoolingToReduceOp();
ModelTransformer transformer(graph);
if (!transformer.Apply("add_bias", add_bias_transform.get())) {
return absl::InternalError("Invalid add_bias transform");
}
if (!transformer.Apply("merge_padding", merge_padding_transform.get())) {
return absl::InternalError("Invalid merge_padding transform");
}
if (!transformer.Apply("global pooling to mean",
pooling_to_reduce_op.get())) {
return absl::InternalError("Invalid global pooling to mean transform");
}
return absl::OkStatus();
}
}
} | #include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_model_test_util.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, LinkingConvolutionAndCosOp) {
auto status = TestLinkingConvolutionAndCosOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMul) {
auto status = TestLinkingConvolution2InputMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputBroadcastMul2InputMul) {
auto status = TestLinkingConvolution2InputBroadcastMul2InputMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputBroadcastMul) {
auto status = TestLinkingConvolution2InputMul2InputBroadcastMul(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolution2InputMul2InputMulCos) {
auto status = TestLinkingConvolution2InputMul2InputMulCos(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanh2InputDiff) {
auto status = TestLinkingConvolutionFirstTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionSecondTanh2InputDiff) {
auto status = TestLinkingConvolutionSecondTanh2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvolutionFirstTanhSecondCos2InputDiff) {
auto status = TestLinkingConvolutionFirstTanhSecondCos2InputDiff(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingComplex0) {
auto status = TestLinkingComplex0(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingConvElem2InputAddElemsOp) {
auto status = TestLinkingConvElem2InputAddElemsOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingSliceCastOp) {
auto status = TestLinkingSliceCastOp(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddAddMulOp) {
auto status = TestLinkingAddAddMulOp(&exec_env_,
true);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, LinkingAddMulOp) {
auto status =
TestLinkingAddAddMulOp(&exec_env_, false);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/gpu_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/testing/gpu_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3e95488-5275-405c-a295-dbce3315969e | cpp | tensorflow/tensorflow | graph_to_functiondef | tensorflow/core/framework/graph_to_functiondef.cc | tensorflow/core/framework/graph_to_functiondef_test.cc | #include "tensorflow/core/framework/graph_to_functiondef.h"
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/base64.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace {
class NodeNameMapping {
public:
NodeNameMapping() = default;
string GetInputName(const string& name);
string GetOutputName(const string& name);
string Uniquify(const string& name);
Status UseOutputName(const string& name);
string Lookup(const string& name) const;
private:
string UniquifyHelper(const string& name);
static string Normalize(string name);
absl::flat_hash_map<string, uint64> used_names_;
absl::flat_hash_map<string, string> name_mapping_;
};
string NodeNameMapping::Normalize(string name) {
if (name.empty()) return "unknown";
const int n = name.size();
for (int i = 0; i < n; ++i) {
char c = name[i];
if (isalnum(c)) {
if (isupper(c)) {
name[i] = tolower(c);
}
} else {
name[i] = '_';
}
}
int i = 0;
for (; i < n; ++i) {
if (isalpha(name[i])) break;
}
return i == n ? "unknown" : name.substr(i);
}
string NodeNameMapping::UniquifyHelper(const string& name) {
auto it = used_names_.emplace(name, 0);
if (it.second) return name;
while (true) {
const string candidate = strings::StrCat(name, "_", it.first->second);
it.first->second++;
if (used_names_.emplace(candidate, 0).second) return candidate;
}
}
string NodeNameMapping::GetInputName(const string& name) {
const string& input_name = UniquifyHelper(Normalize(name));
name_mapping_[name] = input_name;
return input_name;
}
string NodeNameMapping::GetOutputName(const string& name) {
const string& input_name = UniquifyHelper(Normalize(name));
return input_name;
}
string NodeNameMapping::Uniquify(const string& name) {
const string uniqued = UniquifyHelper(name);
name_mapping_[name] = uniqued;
return uniqued;
}
Status NodeNameMapping::UseOutputName(const string& name) {
const auto& iter = used_names_.find(name);
if (iter != used_names_.end()) {
return errors::InvalidArgument(
"Cannot have duplicate output names. Name '", name,
"' appears more than once in 'output_names' array.");
}
used_names_.emplace(name, 0);
return absl::OkStatus();
}
string NodeNameMapping::Lookup(const string& name) const {
const auto iter = name_mapping_.find(name);
if (iter == name_mapping_.end()) return string();
return iter->second;
}
Status FillFunctionBody(
const string& fn_name, const NodeNameMapping& node_names,
const std::vector<const Node*>& body_nodes,
const absl::flat_hash_map<string, string>& tensor_renaming,
bool set_stateful_from_nodes, bool copy_placeholder_attrs_from_nodes,
bool allow_destructive_reads, FunctionDef* fdef) {
absl::flat_hash_set<string> func_attr_names;
for (const auto& func_attr : fdef->signature().attr()) {
func_attr_names.insert(func_attr.name());
}
std::vector<const Edge*> in_edges;
std::vector<const Edge*> control_edges;
for (const Node* node : body_nodes) {
NodeDef* node_def = fdef->add_node_def();
NodeDebugInfo debug_info(node->def());
if (allow_destructive_reads) {
Node* mutable_node = const_cast<Node*>(node);
*node_def->mutable_op() =
node->def()
.op();
*node_def->mutable_attr() =
std::move(*mutable_node->mutable_def()->mutable_attr());
if (node->def().has_experimental_debug_info()) {
*node_def->mutable_experimental_debug_info() = std::move(
*mutable_node->mutable_def()->mutable_experimental_debug_info());
}
if (node->def().has_experimental_type()) {
*node_def->mutable_experimental_type() = std::move(
*mutable_node->mutable_def()->mutable_experimental_type());
}
} else {
*node_def = node->def();
MergeDebugInfo(NodeDebugInfo(node->def()), node_def);
node_def->clear_input();
}
if (!node->assigned_device_name().empty()) {
node_def->set_device(node->assigned_device_name());
}
node_def->set_name(node_names.Lookup(node->name()));
in_edges.clear();
in_edges.resize(node->num_inputs(), nullptr);
control_edges.clear();
for (const Edge* edge : node->in_edges()) {
if (edge->src()->IsSource()) continue;
if (edge->IsControlEdge()) {
control_edges.push_back(edge);
} else {
in_edges[edge->dst_input()] = edge;
}
}
std::sort(control_edges.begin(), control_edges.end(),
[](const Edge* a, const Edge* b) {
return a->src()->name() < b->src()->name();
});
for (size_t i = 0; i < in_edges.size(); ++i) {
const Edge* edge = in_edges[i];
std::string original_input_name;
if (edge == nullptr) {
if (i >= node->requested_inputs().size()) {
return errors::InvalidArgument(
"Graph to be converted to function appears to be malformed. ",
"Node ", node->name(), " is missing input edge ", i);
}
original_input_name =
ParseTensorName(node->requested_inputs()[i]).ToString();
} else {
original_input_name =
strings::StrCat(edge->src()->name(), ":", edge->src_output());
}
const auto iter = tensor_renaming.find(original_input_name);
if (iter == tensor_renaming.end()) {
return errors::InvalidArgument(
"Input ", i, ", '", original_input_name, "', of node '",
node->name(), "' in function '", fn_name,
"' is not available. You might need to include it in inputs "
"or include its source node in the body");
}
node_def->add_input(iter->second);
}
for (const Edge* edge : control_edges) {
const string normalized = node_names.Lookup(edge->src()->name());
if (normalized.empty()) {
return errors::InvalidArgument(
"The source of control edge ", edge->DebugString(),
" is not in the body. Encountered while creating function '",
fn_name, "'");
}
node_def->add_input(strings::StrCat("^", normalized));
}
if (set_stateful_from_nodes && node->op_def().is_stateful()) {
fdef->mutable_signature()->set_is_stateful(true);
}
if (!copy_placeholder_attrs_from_nodes) {
continue;
}
for (const auto& iter : node_def->attr()) {
if (iter.second.placeholder().empty()) {
continue;
}
const std::string& func_attr_name = iter.second.placeholder();
if (func_attr_names.find(func_attr_name) != func_attr_names.end()) {
continue;
}
const std::string& node_attr_name = iter.first;
const OpDef::AttrDef* node_attr_def = nullptr;
for (const auto& node_attr : node->op_def().attr()) {
if (node_attr.name() == node_attr_name) {
node_attr_def = &node_attr;
}
}
if (!node_attr_def) {
return errors::Unimplemented(
"Placeholder value is not supported for attributes not in OpDef. "
"Attribute: ",
node_attr_name, ", OpDef: ", node->op_def().DebugString());
}
OpDef::AttrDef* attr_def = fdef->mutable_signature()->add_attr();
attr_def->set_name(func_attr_name);
attr_def->set_type(node_attr_def->type());
func_attr_names.insert(func_attr_name);
}
}
return absl::OkStatus();
}
Status GraphToFunctionDefHelper(
const Graph& fn_body, const string& fn_name, bool append_hash_to_fn_name,
bool set_stateful_from_nodes, bool copy_placeholder_attrs_from_nodes,
const std::vector<const Node*>& body_nodes,
const std::vector<OutputTensor>& inputs,
const std::vector<OutputTensor>& outputs,
const std::vector<string>& output_names,
const std::vector<const Node*>& control_outputs,
const std::vector<string>& control_output_names, const char* description,
bool allow_destructive_reads, FunctionDef* fdef) {
if (!output_names.empty()) {
DCHECK_EQ(output_names.size(), outputs.size());
}
if (description != nullptr) {
fdef->mutable_signature()->set_description(description);
}
NodeNameMapping node_names;
absl::flat_hash_map<string, string> tensor_renaming;
for (size_t i = 0; i < outputs.size(); ++i) {
const Node* node = outputs[i].node;
int idx = outputs[i].index;
OpDef::ArgDef* argdef = fdef->mutable_signature()->add_output_arg();
if (node->IsRetval()) {
argdef->set_type(node->input_type(idx));
} else {
argdef->set_type(node->output_type(idx));
}
if (!output_names.empty()) {
TF_RETURN_IF_ERROR(node_names.UseOutputName(output_names[i]));
argdef->set_name(output_names[i]);
} else {
argdef->set_name(node_names.GetOutputName(node->name()));
}
}
for (size_t i = 0; i < inputs.size(); ++i) {
const Node* node = inputs[i].node;
int idx = inputs[i].index;
OpDef::ArgDef* argdef = fdef->mutable_signature()->add_input_arg();
argdef->set_type(node->output_type(idx));
const string& input_name = node_names.GetInputName(node->name());
argdef->set_name(input_name);
FunctionDef::ArgAttrs arg_attrs;
int64_t resource_arg_unique_id = -1;
for (const auto& attr : node->attrs()) {
if (absl::StartsWith(attr.first, "_")) {
arg_attrs.mutable_attr()->insert(attr);
} else if (attr.first == "shape" && argdef->type() != DT_RESOURCE) {
AttrValue value;
*(value.mutable_list()->add_shape()) = attr.second.shape();
arg_attrs.mutable_attr()->insert({"_output_shapes", value});
} else if (attr.first == "value" && node->type_string() == "Const") {
AttrValue value;
*(value.mutable_list()->add_shape()) =
attr.second.tensor().tensor_shape();
arg_attrs.mutable_attr()->insert({"_output_shapes", value});
}
if (attr.first == "_resource_arg_unique_id") {
resource_arg_unique_id = attr.second.i();
}
}
if (arg_attrs.attr_size() > 0) {
(*fdef->mutable_arg_attr())[i] = std::move(arg_attrs);
}
if (resource_arg_unique_id >= 0) {
(*fdef->mutable_resource_arg_unique_id())[idx] = resource_arg_unique_id;
}
tensor_renaming[strings::StrCat(node->name(), ":", idx)] = input_name;
}
for (const Node* node : body_nodes) {
const string& node_name = node_names.Uniquify(node->name());
NameRangeMap output_ranges;
TF_RETURN_IF_ERROR(
NameRangesForNode(*node, node->op_def(), nullptr, &output_ranges));
for (const auto& output : output_ranges) {
const StringPiece& output_name = output.first;
int index_start = output.second.first;
int index_end = output.second.second;
for (int i = index_start; i < index_end; ++i) {
const string& original_name = strings::StrCat(node->name(), ":", i);
const string& new_name =
strings::StrCat(node_name, ":", output_name, ":", i - index_start);
if (tensor_renaming.find(original_name) == tensor_renaming.end()) {
tensor_renaming[original_name] = new_name;
}
}
}
}
TF_RETURN_IF_ERROR(FillFunctionBody(
fn_name, node_names, body_nodes, tensor_renaming, set_stateful_from_nodes,
copy_placeholder_attrs_from_nodes, allow_destructive_reads, fdef));
for (int r = 0; r < fdef->signature().output_arg_size(); ++r) {
const string& ret_name = fdef->signature().output_arg(r).name();
string return_value;
if (outputs[r].node->IsRetval()) {
Edge const* edge;
TF_RETURN_IF_ERROR(outputs[r].node->input_edge(0, &edge));
return_value =
strings::StrCat(edge->src()->name(), ":", edge->src_output());
} else {
return_value =
strings::StrCat(outputs[r].node->name(), ":", outputs[r].index);
}
const auto iter = tensor_renaming.find(return_value);
if (iter == tensor_renaming.end()) {
return errors::InvalidArgument(
"TF_Output ", return_value, " is neither in the function body ",
"nor among function inputs. Encountered while creating function '",
fn_name, "'");
}
(*fdef->mutable_ret())[ret_name] = iter->second;
}
if (append_hash_to_fn_name) {
const uint64 hash = FunctionDefHash(*fdef);
string encoded;
TF_RETURN_IF_ERROR(Base64Encode(
StringPiece(reinterpret_cast<const char*>(&hash), sizeof(hash)),
&encoded));
std::replace(encoded.begin(), encoded.end(), '-', 'a');
std::replace(encoded.begin(), encoded.end(), '_', 'A');
fdef->mutable_signature()->set_name(strings::StrCat(fn_name, "_", encoded));
} else {
fdef->mutable_signature()->set_name(fn_name);
}
if (!control_output_names.empty() &&
(control_outputs.size() != control_output_names.size())) {
return errors::InvalidArgument(
"Expected number of control outputs (", control_outputs.size(),
") and the number of control output names (",
control_output_names.size(), ") to match but they do not.");
}
std::set<string> control_output_names_set;
for (int i = 0; i < control_outputs.size(); ++i) {
string signature_name;
if (!control_output_names.empty()) {
signature_name = control_output_names[i];
} else {
signature_name = control_outputs[i]->name();
}
if (signature_name.empty()) {
return errors::InvalidArgument("Control output name must be not empty");
}
if (!control_output_names_set.insert(signature_name).second) {
return errors::InvalidArgument("Repeated control output name: ",
signature_name);
}
const string control_output_node =
node_names.Lookup(control_outputs[i]->name());
if (control_output_node.empty()) {
return errors::InvalidArgument(
"Control output node name must be not empty");
}
(*fdef->mutable_control_ret())[signature_name] = control_output_node;
}
for (const string& control_output : control_output_names_set) {
fdef->mutable_signature()->add_control_output(control_output);
}
return absl::OkStatus();
}
Status GraphToFunctionDefHelper(
const Graph& graph, const string& name,
const std::function<absl::optional<string>(const Node*)>& control_ret,
const std::vector<string>& output_names, bool allow_destructive_reads,
FunctionDef* fdef) {
auto add_arg_or_retval = [](Node* node,
std::vector<OutputTensor>* args_or_retvals) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &index));
if (index >= args_or_retvals->size()) {
args_or_retvals->resize(index + 1);
}
if ((*args_or_retvals)[index].node == nullptr) {
(*args_or_retvals)[index].node = node;
} else {
return errors::InvalidArgument(
"Multiple '", node->type_string(), "' nodes found with index ", index,
"; originally we already have:\n",
(*args_or_retvals)[index].node->DebugString(), "\nNow we have:\n",
node->DebugString());
}
return absl::OkStatus();
};
std::vector<const Node*> body_nodes;
std::vector<OutputTensor> inputs;
std::vector<OutputTensor> outputs;
std::vector<const Node*> control_outputs;
std::vector<string> control_output_names;
for (Node* node : graph.op_nodes()) {
if (node->IsArg()) {
TF_RETURN_IF_ERROR(add_arg_or_retval(node, &inputs));
continue;
}
if (node->IsRetval()) {
TF_RETURN_IF_ERROR(add_arg_or_retval(node, &outputs));
continue;
}
if (control_ret) {
auto control_ret_name = control_ret(node);
if (control_ret_name.has_value()) {
control_outputs.push_back(node);
control_output_names.push_back(control_ret_name.value());
}
}
body_nodes.push_back(node);
}
auto validate_args_retvals =
[](const std::vector<OutputTensor>& args_or_retvals,
const string& op_type) {
for (int i = 0, e = args_or_retvals.size(); i < e; ++i) {
if (args_or_retvals[i].node == nullptr) {
return errors::InvalidArgument("Missing '", op_type,
"' node at index ", i);
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(validate_args_retvals(inputs, "_Arg"));
TF_RETURN_IF_ERROR(validate_args_retvals(outputs, "_Retval"));
return GraphToFunctionDefHelper(
graph, name, false,
false,
false, body_nodes, inputs, outputs,
output_names, control_outputs, control_output_names,
nullptr, allow_destructive_reads, fdef);
}
}
Status GraphToFunctionDef(const Graph& fn_body, const string& fn_name,
bool append_hash_to_fn_name,
bool set_stateful_from_nodes,
bool copy_placeholder_attrs_from_nodes,
const std::vector<const Node*>& body_nodes,
const std::vector<OutputTensor>& inputs,
const std::vector<OutputTensor>& outputs,
const std::vector<string>& output_names,
const std::vector<const Node*>& control_outputs,
const std::vector<string>& control_output_names,
const char* description, FunctionDef* fdef) {
return GraphToFunctionDefHelper(
fn_body, fn_name, append_hash_to_fn_name, set_stateful_from_nodes,
copy_placeholder_attrs_from_nodes, body_nodes, inputs, outputs,
output_names, control_outputs, control_output_names, description,
false, fdef);
return absl::OkStatus();
}
Status GraphToFunctionDef(
const Graph& graph, const string& name,
const std::function<absl::optional<string>(const Node*)>& control_ret,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(graph, name, control_ret,
{},
false, fdef);
}
Status GraphToFunctionDef(const Graph& graph, const string& name,
FunctionDef* fdef) {
return GraphToFunctionDef(graph, name, nullptr, fdef);
}
Status GraphToFunctionDef(const Graph& graph, const string& name,
const std::vector<std::string>& output_names,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(graph, name, nullptr,
output_names,
false, fdef);
}
Status GraphToFunctionDef(
std::unique_ptr<Graph> graph, const string& name,
const std::function<std::optional<string>(const Node*)>& control_ret,
FunctionDef* fdef) {
return GraphToFunctionDefHelper(*graph, name, control_ret,
{},
true, fdef);
}
} | #include "tensorflow/core/framework/graph_to_functiondef.h"
#include <utility>
#include <vector>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/base64.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
FunctionDef RemoveDebugInfo(const FunctionDef& def) {
FunctionDef copy = def;
for (auto& node_def : *copy.mutable_node_def()) {
node_def.clear_experimental_debug_info();
}
return copy;
}
bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
string* diff) {
if (a.DebugString() != b.DebugString()) {
if (diff) {
*diff = strings::StrCat("Definition mismatch for function ",
a.signature().name(), ":\n", a.DebugString(),
"\n ---- vs. ----\n", b.DebugString());
}
return false;
}
return true;
}
TEST(GraphToFunctionDefTest, Basics) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Arg(root.WithOpName("B"), DT_FLOAT, 1);
auto c = ops::_Arg(root.WithOpName("C"), DT_FLOAT, 2);
auto d = ops::Add(root.WithOpName("D"), a, b);
auto e = ops::Add(root.WithOpName("b"), d, c);
auto f = ops::Neg(root.WithOpName("h"), e);
auto g = ops::AddN(root.WithOpName("G"), std::initializer_list<Output>{e, f});
auto h = ops::_Retval(root.WithOpName("H"), g, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", &fdef));
FunctionDef fdef_expected = FunctionDefHelper::Create(
"test_fn",
{"a: float", "b: float", "c: float"},
{"h: float"},
{},
{
{{"D"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"b_0"}, "Add", {"D:z:0", "c"}, {{"T", DT_FLOAT}}},
{{"h_0"}, "Neg", {"b_0:z:0"}, {{"T", DT_FLOAT}}},
{{"G"}, "AddN", {"b_0:z:0", "h_0:y:0"}, {{"N", 2}, {"T", DT_FLOAT}}},
},
{{"h", "G:sum:0"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, OverrideOutputNames) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Retval(root.WithOpName("H"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", {"b"}, &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, DuplicatedOutputNames) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
auto c = ops::_Retval(root.WithOpName("C"), a, 1);
FunctionDef fdef;
auto status = GraphToFunctionDef(*root.graph(), "test_fn", {"d", "d"}, &fdef);
EXPECT_THAT(status, tensorflow::testing::StatusIs(
error::INVALID_ARGUMENT,
"Cannot have duplicate output names. Name 'd' "
"appears more than once in 'output_names' array."));
}
TEST(GraphToFunctionDefTest, ArgAttrShape) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
AttrValue shape_attr;
*(shape_attr.mutable_shape()) = TensorShape({1, 2}).AsProto();
a.node()->AddAttr("shape", shape_attr);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
FunctionDef::ArgAttrs attrs;
AttrValue output_shapes;
*(output_shapes.mutable_list()->add_shape()) = TensorShape({1, 2}).AsProto();
attrs.mutable_attr()->insert({"_output_shapes", output_shapes});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ArgAttrPrivateAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
AttrValue private_attr;
*(private_attr.mutable_s()) = "value";
a.node()->AddAttr("_name", private_attr);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*root.graph(), "test_fn", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
FunctionDef::ArgAttrs attrs;
attrs.mutable_attr()->insert({"_name", private_attr});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ArgAttrConstInput) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
Tensor t(DT_FLOAT, TensorShape({2, 2}));
TensorProto t_proto;
t.AsProtoField(&t_proto);
AttrValue attr;
*(attr.mutable_tensor()) = std::move(t_proto);
a.node()->AddAttr("value", attr);
a.node()->AddAttr("index", 0);
auto b = ops::_Retval(root.WithOpName("B"), a, 0);
std::vector<OutputTensor> inputs;
std::vector<OutputTensor> outputs;
auto add_arg_or_retval = [](Node* node,
std::vector<OutputTensor>* args_or_retvals) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "index", &index));
if (index >= args_or_retvals->size()) {
args_or_retvals->resize(index + 1);
}
(*args_or_retvals)[index].node = node;
return absl::OkStatus();
};
for (Node* node : root.graph()->op_nodes()) {
if (node->IsConstant()) {
TF_EXPECT_OK(add_arg_or_retval(node, &inputs));
} else {
TF_EXPECT_OK(add_arg_or_retval(node, &outputs));
}
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
false, {}, inputs,
outputs,
{}, {}, {},
"ArgAttrConstInput", &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"b: float"},
{},
{},
{{"b", "a"}});
AttrValue value;
*(value.mutable_list()->add_shape()) = TensorShape({2, 2}).AsProto();
FunctionDef::ArgAttrs attrs;
attrs.mutable_attr()->insert({"_output_shapes", value});
(*fdef_expected.mutable_arg_attr())[0] = std::move(attrs);
(*fdef_expected.mutable_signature()->mutable_description()) =
"ArgAttrConstInput";
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, AppendHashToFnName) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("attr_name_not_found", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", true,
false,
false, body_nodes,
{},
{},
{}, {}, {},
nullptr, &fdef));
EXPECT_TRUE(absl::StartsWith(fdef.signature().name(), "test_fn_"));
}
TEST(GraphToFunctionDefTest, CopyPlaceholderAttrsFromNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::VarHandleOp(root.WithOpName("var"), DT_FLOAT, {});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("shared_name", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
true, body_nodes, {},
{},
{}, {}, {},
nullptr, &fdef));
}
TEST(GraphToFunctionDefTest, CopyPlaceholderAttrsFromNodesUnImplemented) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Const(root.WithOpName("A"), 0.0f, {2, 2});
AttrValue foo;
*foo.mutable_placeholder() = "foo";
a.node()->AddAttr("attr_name_not_found", foo);
std::vector<const Node*> body_nodes;
for (Node* node : root.graph()->op_nodes()) {
body_nodes.push_back(node);
}
FunctionDef fdef;
auto status = GraphToFunctionDef(
*root.graph(), "test_fn", false,
false,
true, body_nodes, {},
{},
{}, {}, {},
nullptr, &fdef);
EXPECT_EQ(status.code(), error::UNIMPLEMENTED);
}
TEST(GraphToFunctionDefTest, ControlDependencies) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("a"), DT_FLOAT, 0);
auto b = ops::Neg(root.WithOpName("b").WithControlDependencies(a), a);
auto c = ops::_Retval(root.WithOpName("c").WithControlDependencies(b), b, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", &fdef));
FunctionDef fdef_expected = FunctionDefHelper::Create(
"test_fn",
{"a: float"},
{"c: float"},
{},
{
{{"b"}, "Neg", {"a", "^a"}, {{"T", DT_FLOAT}}},
},
{{"c", "b:y:0"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
TEST(GraphToFunctionDefTest, ControlOutputs) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(root.WithOpName("a"), DT_FLOAT, 0);
auto b = ops::Neg(root.WithOpName("b"), a);
auto c = ops::_Retval(root.WithOpName("c"), b, 0);
GraphDef graph_def;
TF_EXPECT_OK(root.ToGraphDef(&graph_def));
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphConstructorOptions options;
TF_EXPECT_OK(ConvertGraphDefToGraph(options, graph_def, graph.get()));
const auto control_ret = [](const Node* n) -> absl::optional<string> {
if (n->name() == "b") return absl::make_optional<string>("must_execute");
return absl::nullopt;
};
FunctionDef fdef;
TF_EXPECT_OK(GraphToFunctionDef(*graph, "test_fn", control_ret, &fdef));
FunctionDef fdef_expected =
FunctionDefHelper::Create("test_fn",
{"a: float"},
{"c: float"},
{},
{
{{"b"}, "Neg", {"a"}, {{"T", DT_FLOAT}}},
},
{{"c", "b:y:0"}},
{{"must_execute", "b"}});
string diff;
bool fdefs_equal =
EqualFunctionDef(fdef_expected, RemoveDebugInfo(fdef), &diff);
EXPECT_TRUE(fdefs_equal) << diff;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_to_functiondef.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/graph_to_functiondef_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
82253c7c-0a81-4089-ae97-3fa38916f40c | cpp | tensorflow/tensorflow | nvjitlink | third_party/xla/xla/stream_executor/cuda/nvjitlink.h | third_party/xla/xla/stream_executor/cuda/nvjitlink_test.cc | #ifndef XLA_STREAM_EXECUTOR_CUDA_NVJITLINK_H_
#define XLA_STREAM_EXECUTOR_CUDA_NVJITLINK_H_
#include <cstdint>
#include <tuple>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
namespace stream_executor {
using NvJitLinkVersion = std::tuple<unsigned, unsigned>;
absl::StatusOr<NvJitLinkVersion> GetNvJitLinkVersion();
struct NvJitLinkInput {
enum class Type { kPtx, kCubin };
Type type;
absl::Span<const uint8_t> bytes;
};
absl::StatusOr<std::vector<uint8_t>> CompileAndLinkUsingLibNvJitLink(
int cc_major, int cc_minor, absl::Span<const NvJitLinkInput> inputs,
GpuAsmOpts options, bool cancel_if_reg_spill);
}
#endif | #include "xla/stream_executor/cuda/nvjitlink.h"
#include <sys/types.h>
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/stream_executor/cuda/nvjitlink_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace {
constexpr const char kDependeePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .func (.param .b32 func_retval0) _Z5magicv()
{
.reg .b32 %r<2>;
mov.u32 %r1, 42;
st.param.b32 [func_retval0+0], %r1;
ret;
})";
constexpr const char kDependentPtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.extern .func (.param .b32 func_retval0) _Z5magicv
()
;
.visible .entry _Z6kernelPi(
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<2>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
{
.reg .b32 temp_param_reg;
.param .b32 retval0;
call.uni (retval0),
_Z5magicv,
(
);
ld.param.b32 %r1, [retval0+0];
}
st.global.u32 [%rd2], %r1;
ret;
})";
constexpr const char kStandalonePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi (
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<16>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
mov.u32 %r1, 42;
st.global.u32 [%rd2], %r15;
ret;
})";
constexpr stream_executor::CudaComputeCapability kDefaultComputeCapability{5,
2};
auto CompileAndLinkHelper(stream_executor::CudaComputeCapability cc,
absl::Span<const char* const> ptx_inputs,
bool disable_gpuasm_optimizations = false,
bool cancel_if_reg_spill = false) {
std::vector<stream_executor::NvJitLinkInput> inputs;
inputs.reserve(ptx_inputs.size());
for (const char* ptx_input : ptx_inputs) {
inputs.emplace_back(stream_executor::NvJitLinkInput{
stream_executor::NvJitLinkInput::Type::kPtx,
absl::Span<const uint8_t>{reinterpret_cast<const uint8_t*>(ptx_input),
std::strlen(ptx_input) + 1}});
}
stream_executor::GpuAsmOpts options{};
options.disable_gpuasm_optimizations = disable_gpuasm_optimizations;
return stream_executor::CompileAndLinkUsingLibNvJitLink(
cc.major, cc.minor, inputs, options, cancel_if_reg_spill);
}
class NvJitLinkTest : public ::testing::Test {
void SetUp() override {
if (!stream_executor::IsLibNvJitLinkSupported()) {
GTEST_SKIP();
}
}
};
TEST_F(NvJitLinkTest, GetVersion) {
EXPECT_THAT(stream_executor::GetNvJitLinkVersion(),
tsl::testing::IsOkAndHolds(
testing::Ge(stream_executor::NvJitLinkVersion{12, 0})));
}
TEST_F(NvJitLinkTest, IdentifiesUnsupportedArchitecture) {
EXPECT_THAT(
CompileAndLinkHelper(stream_executor::CudaComputeCapability{100, 0},
{kStandalonePtx}),
tsl::testing::StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_F(NvJitLinkTest, LinkingTwoCompilationUnitsSucceeds) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{kDependentPtx, kDependeePtx}),
tsl::testing::IsOk());
}
TEST_F(NvJitLinkTest, LinkingFailsWhenDependeeIsMissing) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability, {kDependentPtx}),
tsl::testing::StatusIs(absl::StatusCode::kUnknown));
}
TEST_F(NvJitLinkTest, CanAlsoJustCompileSingleCompilationUnit) {
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability, {kStandalonePtx}),
tsl::testing::IsOk());
}
TEST_F(NvJitLinkTest, CancelsOnRegSpill) {
std::string dependent_ptx = absl::StrReplaceAll(
kDependentPtx, {{"
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{dependent_ptx.c_str(), kDependeePtx},
true,
true),
tsl::testing::StatusIs(absl::StatusCode::kCancelled));
EXPECT_THAT(CompileAndLinkHelper(kDefaultComputeCapability,
{dependent_ptx.c_str(), kDependeePtx},
true,
false),
tsl::testing::IsOk());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/nvjitlink.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/nvjitlink_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a26d4ad-5249-4221-b3ca-2779d8338ce3 | cpp | google/arolla | substitution | arolla/expr/visitors/substitution.cc | arolla/expr/visitors/substitution_test.cc | #include "arolla/expr/visitors/substitution.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
namespace {
template <class Key, class KeyFn>
absl::StatusOr<ExprNodePtr> Substitute(
ExprNodePtr expr, const absl::flat_hash_map<Key, ExprNodePtr>& subs,
KeyFn key_fn) {
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
if (auto key = key_fn(node); key.has_value()) {
if (auto it = subs.find(*key); it != subs.end()) {
return it->second;
}
}
return WithNewDependencies(node, DereferenceVisitPointers(visits));
});
}
}
absl::StatusOr<ExprNodePtr> SubstituteByName(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<std::string> {
if (IsNameAnnotation(expr)) {
return std::string(ReadNameAnnotation(expr));
}
return std::nullopt;
});
}
absl::StatusOr<ExprNodePtr> SubstituteLeaves(
ExprNodePtr expr,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<std::string> {
if (expr->is_leaf()) return expr->leaf_key();
return std::nullopt;
});
}
absl::StatusOr<ExprNodePtr> SubstitutePlaceholders(
ExprNodePtr expr, const absl::flat_hash_map<std::string, ExprNodePtr>& subs,
bool must_substitute_all) {
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
if (node->is_placeholder()) {
if (subs.contains(node->placeholder_key())) {
return subs.at(node->placeholder_key());
} else if (must_substitute_all) {
return absl::InvalidArgumentError(absl::StrFormat(
"No value was provided for P.%s, but substitution of all "
"placeholders was requested.",
node->placeholder_key()));
}
}
return WithNewDependencies(node, DereferenceVisitPointers(visits));
});
}
absl::StatusOr<ExprNodePtr> SubstituteByFingerprint(
ExprNodePtr expr,
const absl::flat_hash_map<Fingerprint, ExprNodePtr>& subs) {
return Substitute(expr, subs,
[](const auto& expr) -> std::optional<Fingerprint> {
return expr->fingerprint();
});
}
} | #include "arolla/expr/visitors/substitution.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithNameAnnotation;
TEST(SubstitutionTest, SubsByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "ly"));
ASSERT_OK_AND_ASSIGN(auto z, WithNameAnnotation(Leaf("z"), "lz"));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr, CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr, CallOp("math.add", {x, z}));
EXPECT_THAT(SubstituteByName(expr, {{"ly", z}}),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
TEST(SubstitutionTest, SubstituteLeavesByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "ly"));
EXPECT_THAT(SubstituteByName(x, {{"lx", y}}), IsOkAndHolds(EqualsExpr(y)));
}
TEST(SubstitutionTest, SubstitutePlaceholdersByName) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Placeholder("x"), "px"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Placeholder("y"), "py"));
EXPECT_THAT(SubstituteByName(x, {{"px", y}}), IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstituteByName(x, {{"x", y}}), IsOkAndHolds(EqualsExpr(x)));
}
TEST(SubstitutionTest, SubstitutePlaceholders) {
auto px = Placeholder("x");
auto py = Placeholder("y");
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(px, "name"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(py, "name"));
EXPECT_THAT(SubstitutePlaceholders(x, {{"x", py}}),
IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstitutePlaceholders(x, {{"name", py}}),
IsOkAndHolds(EqualsExpr(x)));
}
TEST(SubstitutionTest, SubstituteLeaves) {
auto lx = Leaf("x");
auto ly = Leaf("y");
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(lx, "name"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(ly, "name"));
EXPECT_THAT(SubstituteLeaves(x, {{"x", ly}}), IsOkAndHolds(EqualsExpr(y)));
EXPECT_THAT(SubstituteLeaves(x, {{"name", ly}}), IsOkAndHolds(EqualsExpr(x)));
}
TEST(SubstitutionTest, SubsByFingerprint) {
ASSERT_OK_AND_ASSIGN(auto x, WithNameAnnotation(Leaf("x"), "lx"));
ASSERT_OK_AND_ASSIGN(auto y, WithNameAnnotation(Leaf("y"), "lx"));
ASSERT_OK_AND_ASSIGN(auto z, WithNameAnnotation(Leaf("z"), "lz"));
ASSERT_OK_AND_ASSIGN(auto x_add_expr, CallOp("math.add", {x, x}));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.add", {x_add_expr, y}));
absl::flat_hash_map<Fingerprint, ExprNodePtr> subs = {
{x->fingerprint(), y},
{x_add_expr->fingerprint(), z},
{y->fingerprint(), x}};
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr, CallOp("math.add", {z, x}));
EXPECT_THAT(SubstituteByFingerprint(expr, subs),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/visitors/substitution.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/visitors/substitution_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
f24af8d1-7211-4c76-950a-dff58c823db9 | cpp | tensorflow/tensorflow | windowed_einsum_handler | third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler.cc | third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler_test.cc | #include "xla/service/gpu/transforms/windowed_einsum_handler.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = match;
absl::StatusOr<bool> ShiftDequantizationF8(HloComputation* while_body) {
HloInstruction* while_instr = while_body->WhileCallInstruction();
if (!while_instr || while_instr->operand(0)->user_count() != 1) {
return false;
}
HloInstruction* param_tuple = while_instr->mutable_operand(0);
std::array<HloInstruction*, 2> binaries, operands, scales;
std::array<std::vector<HloInstruction*>, 2> unaries;
for (int k = 0; k < 2; ++k) {
HloInstruction* operand = param_tuple->mutable_operand(k);
while (operand->opcode() == HloOpcode::kBitcast ||
operand->opcode() == HloOpcode::kBroadcast ||
operand->opcode() == HloOpcode::kCopy ||
operand->opcode() == HloOpcode::kReshape ||
operand->opcode() == HloOpcode::kTranspose) {
unaries[k].emplace_back(operand);
operand = operand->mutable_operand(0);
}
std::reverse(unaries[k].begin(), unaries[k].end());
if (!Match(operand,
m::AnyOf<HloInstruction>(
m::Divide(&binaries[k], m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k]))),
m::MultiplyAnyOrder(&binaries[k],
m::Convert(m::Op(&operands[k])),
m::Broadcast(m::Op(&scales[k])))))) {
VLOG(5) << "Unable to identify FP8 dequantization pattern.";
return false;
}
}
std::array<PrimitiveType, 2> operand_types{
operands[0]->shape().element_type(), operands[1]->shape().element_type()};
if (!((operand_types[0] == F8E4M3FN && operand_types[1] == F8E4M3FN) ||
(operand_types[0] == F8E4M3FN && operand_types[1] == F8E5M2) ||
(operand_types[0] == F8E5M2 && operand_types[1] == F8E4M3FN))) {
VLOG(5) << "Unsupported types.";
return false;
}
for (int k = 0; k < 2; ++k) {
if (binaries[k]->shape().element_type() != BF16 &&
binaries[k]->shape().element_type() != F16 &&
binaries[k]->shape().element_type() != F32) {
VLOG(5) << "Unsupported types.";
return false;
}
}
if (!ShapeUtil::IsScalar(scales[0]->shape()) ||
!ShapeUtil::IsScalar(scales[1]->shape())) {
VLOG(5) << "Scaling factors must be scalars.";
return false;
}
HloComputation* while_condition = while_instr->while_condition();
HloInstruction* while_root = while_body->root_instruction();
std::array<HloInstruction*, 2> dots, gtes, dyn_slices{nullptr, nullptr},
coll_perms{nullptr, nullptr};
if (Match(while_root,
m::Tuple(m::CollectivePermute(
&coll_perms[1],
m::CollectivePermute(
&coll_perms[0],
m::GetTupleElement(>es[0], m::Parameter(), 0))),
m::GetTupleElement(>es[1], m::Parameter(), 1),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice().WithOperand(
1, m::Dot(&dots[0], m::Op(), m::Op())),
m::Dot(&dots[1], m::Op(), m::Op()), m::Op(), m::Op(),
m::Op()),
m::Op(), m::Op())) &&
dots[0]->operand(0) == gtes[0] && dots[0]->operand(1) == gtes[1] &&
dots[1]->operand(1) == gtes[1]) {
VLOG(5) << "Identified all-gather windowed einsum pattern.";
} else if (Match(
while_root,
m::Tuple(m::GetTupleElement(>es[0], m::Parameter(), 0),
m::GetTupleElement(>es[1], m::Parameter(), 1),
m::AddAnyOrder(
m::Dot(&dots[0], m::DynamicSlice(&dyn_slices[0]),
m::Op()),
m::Op()),
m::CollectivePermute(m::AddAnyOrder(
m::Dot(&dots[1], m::DynamicSlice(&dyn_slices[1]),
m::Op()),
m::Op())),
m::Op())) &&
dots[0]->operand(1) == gtes[1] && dots[1]->operand(1) == gtes[1]) {
VLOG(5) << "Identified reduce-scatter windowed einsum pattern.";
} else {
VLOG(5) << "Unable to identify valid windowed einsum pattern.";
return false;
}
for (int k = 0; k < 2; ++k) {
for (HloInstruction* unary : unaries[k]) {
Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout(
operands[k]->shape().element_type(), unary->shape().dimensions(),
unary->shape().layout().minor_to_major());
operands[k] = unary->AddInstruction(unary->CloneWithNewOperands(
ShapeUtil::MakeShapeWithDenseLayout(
operands[k]->shape().element_type(), unary->shape().dimensions(),
unary->shape().layout().minor_to_major()),
{operands[k]}));
}
}
for (int k = 0; k < 2; ++k) {
TF_RETURN_IF_ERROR(
param_tuple->ReplaceOperandWithDifferentShape(k, operands[k]));
ShapeUtil::UpdateTupleShape(operands[k]->shape(), k,
param_tuple->mutable_shape());
param_tuple->AppendOperand(scales[k]);
ShapeUtil::AppendShapeToTuple(scales[k]->shape(),
param_tuple->mutable_shape());
}
for (HloComputation* while_comp : {while_body, while_condition}) {
while_comp->ReplaceParameter(
0, HloInstruction::CreateParameter(
0, param_tuple->shape(),
while_comp->parameter_instruction(0)->name()));
}
HloInstruction* body_param = while_body->parameter_instruction(0);
for (int k = 0; k < 2; ++k) {
TF_ASSIGN_OR_RETURN(HloInstruction * operand_f8,
MakeGetTupleElementHlo(body_param, k));
if (while_root->operand(k) == gtes[k]) {
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(k, operand_f8));
ShapeUtil::UpdateTupleShape(operand_f8->shape(), k,
while_root->mutable_shape());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scale,
MakeGetTupleElementHlo(
body_param, body_param->shape().tuple_shapes_size() - 2 + k));
while_root->AppendOperand(operand_scale);
ShapeUtil::AppendShapeToTuple(operand_scale->shape(),
while_root->mutable_shape());
HloInstruction* operand_f32 =
MakeConvertToHlo(operand_f8, gtes[k]->shape().element_type());
HloInstruction* broadcast_scale =
MakeBroadcastHlo(operand_scale, {}, operand_f32->shape());
TF_ASSIGN_OR_RETURN(
HloInstruction * operand_scaled,
MakeBinaryHlo(binaries[k]->opcode(), operand_f32, broadcast_scale));
for (int l = 0; l < 2; ++l) {
if (dots[l]->operand(k) == gtes[k]) {
TF_RETURN_IF_ERROR(dots[l]->ReplaceOperandWith(k, operand_scaled));
}
if (dyn_slices[l] && dyn_slices[l]->operand(0) == gtes[k]) {
TF_RETURN_IF_ERROR(
dyn_slices[l]->ReplaceOperandWith(0, operand_scaled));
}
}
if (coll_perms[0] && coll_perms[0]->operand(0) == gtes[k]) {
std::array<HloInstruction*, 2> coll_perms_f8{nullptr, nullptr};
coll_perms_f8[0] =
while_body->AddInstruction(coll_perms[0]->CloneWithNewOperands(
operand_f8->shape(), {operand_f8}));
coll_perms_f8[1] =
while_body->AddInstruction(coll_perms[1]->CloneWithNewOperands(
coll_perms_f8[0]->shape(), {coll_perms_f8[0]}));
HloInstruction* coll_perm0_f32 =
MakeConvertToHlo(coll_perms_f8[0], gtes[k]->shape().element_type());
TF_ASSIGN_OR_RETURN(HloInstruction * x_scaled,
MakeBinaryHlo(binaries[k]->opcode(), coll_perm0_f32,
broadcast_scale));
TF_RETURN_IF_ERROR(dots[1]->ReplaceOperandWith(0, x_scaled));
TF_RETURN_IF_ERROR(
while_root->ReplaceOperandWithDifferentShape(0, coll_perms_f8[1]));
ShapeUtil::UpdateTupleShape(coll_perms_f8[1]->shape(), 0,
while_root->mutable_shape());
}
}
HloInstruction* new_while_instr = while_instr->AddInstruction(
while_instr->CloneWithNewShape(while_root->shape()));
TF_RETURN_IF_ERROR(
while_instr->ReplaceAllUsesWithDifferentShape(new_while_instr));
while_instr->while_body()->SetWhileCallInstruction(new_while_instr);
TF_RETURN_IF_ERROR(while_instr->parent()->RemoveInstruction(while_instr));
if (coll_perms[0]) {
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[1]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[0]));
}
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gtes[0]));
TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gtes[1]));
VLOG(5) << "FP8 dequantization moved into while loop.";
return true;
}
int64_t NumberOfInstructionsInComp(const HloComputation* comp, HloOpcode op) {
int64_t total_count = 0;
for (const HloInstruction* inst : comp->instructions()) {
if (inst->opcode() == op) {
++total_count;
}
}
return total_count;
}
absl::Status UpdateDotAndConsumerConfig(HloInstruction* dot,
int64_t stream_id) {
auto dot_gpu_config = dot->backend_config<gpu::GpuBackendConfig>();
HloInstruction* updater = dot->users()[0];
auto updater_gpu_config = updater->backend_config<gpu::GpuBackendConfig>();
dot_gpu_config->set_operation_queue_id(stream_id);
if (!absl::c_linear_search(updater_gpu_config->wait_on_operation_queues(),
stream_id)) {
updater_gpu_config->mutable_wait_on_operation_queues()->Add(stream_id);
}
TF_RETURN_IF_ERROR(dot->set_backend_config(dot_gpu_config.value()));
TF_RETURN_IF_ERROR(updater->set_backend_config(updater_gpu_config.value()));
return absl::OkStatus();
}
absl::Status SetForceDelayForInstruction(HloInstruction* instr,
bool force_delay) {
auto gpu_config = instr->backend_config<gpu::GpuBackendConfig>();
gpu_config->set_force_earliest_schedule(force_delay);
TF_RETURN_IF_ERROR(instr->set_backend_config(gpu_config.value()));
return absl::OkStatus();
}
static int64_t GetAgActivationCacheIndex(const HloInstruction* while_loop) {
const HloInstruction* loop_tuple = while_loop->operand(0);
const Shape& tuple_shape = loop_tuple->shape();
CHECK(tuple_shape.IsTuple());
return tuple_shape.tuple_shapes_size() - 1;
}
absl::Status ProcessWindowedEinsumLoopForActivationCaching(
WindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop) {
HloInstruction* loop = ag_loop.loop;
HloComputation* while_body = loop->while_body();
HloInstruction* input_gte;
for (HloInstruction* gte : while_body->parameter_instruction(0)->users()) {
if (gte->tuple_index() == 0) {
input_gte = gte;
}
}
HloInstruction* root = while_body->root_instruction();
HloInstruction* input_tuple = while_body->parameter_instruction(0);
const Shape& input_shape = input_tuple->shape();
int64_t full_cache_buffer_index = GetAgActivationCacheIndex(loop);
HloInstruction* full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(input_shape, full_cache_buffer_index),
input_tuple, full_cache_buffer_index));
HloInstruction* new_full_buffer_output = nullptr;
HloInstruction* dus_boundary_constant;
HloInstruction* first_cp_output;
for (HloInstruction* gte_user : input_gte->users()) {
if (gte_user->opcode() == HloOpcode::kCollectivePermute) {
first_cp_output = gte_user;
break;
}
}
for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) {
HloInstruction* slice_indices;
if (Match(inst,
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter()), m::Op(),
m::Constant(&dus_boundary_constant),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for first slice: "
<< slice_indices->ToString();
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
if (Match(inst,
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(), m::Op(), m::Constant(),
m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())),
m::Op()))) {
slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape(
dus_boundary_constant->shape(), slice_indices));
VLOG(5) << "Created slice op for second slice: "
<< slice_indices->ToString();
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{dus_boundary_constant, slice_indices, dus_boundary_constant}));
}
HloInstruction* slice_index;
HloInstruction* ds_index_constant;
HloInstruction* remainder;
HloInstruction* ds_param;
if (Match(inst, m::Dot(m::Op(), m::DynamicSlice(&ds_param))) &&
Match(ds_param->operand(0), m::GetTupleElement(m::Parameter(), 1))) {
for (int64_t ds_op_i = 1; ds_op_i < ds_param->operands().size();
ds_op_i++) {
if (!Match(
ds_param->mutable_operand(ds_op_i),
m::Reshape(&slice_index, m::DynamicSlice(m::Constant(),
m::Op(&remainder)))) &&
!Match(ds_param->mutable_operand(ds_op_i),
m::Constant(&ds_index_constant))) {
return absl::OkStatus();
}
}
if (Match(remainder,
m::Remainder(m::Add(m::GetTupleElement(), m::Op()), m::Op()))) {
full_buffer_output_gte =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
input_gte,
{ds_index_constant, ds_index_constant, slice_index}));
}
if (Match(remainder,
m::Remainder(
m::Add(m::Add(m::GetTupleElement(), m::Op()), m::Op()),
m::Op()))) {
new_full_buffer_output =
while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_buffer_output_gte->shape(), full_buffer_output_gte,
first_cp_output,
{ds_index_constant, ds_index_constant, slice_index}));
}
}
}
std::vector<HloInstruction*> original_operands(root->operands().begin(),
root->operands().end());
original_operands.push_back(new_full_buffer_output);
HloInstruction* new_output_tuple = while_body->AddInstruction(
HloInstruction::CreateTuple(original_operands));
TF_RETURN_IF_ERROR(
while_body->ReplaceInstructionWithDifferentShape(root, new_output_tuple));
return absl::OkStatus();
}
bool HasReplicaGroups(const HloInstruction* inst) {
return inst->replica_groups().size() > 0;
}
bool ShouldAddToChain(const HloInstruction* inst) {
switch (inst->opcode()) {
case HloOpcode::kTranspose:
case HloOpcode::kReshape:
case HloOpcode::kCopy:
return inst->user_count() == 1;
default:
return false;
}
}
absl::Status PostProcessUnrolledLoop(HloInstruction* loop, int64_t stream_id) {
HloComputation* while_body = loop->while_body();
int64_t force_delay_cp_gte_index =
while_body->name().find(
WindowedEinsumHandler::kWindowedEinsumRsLoopName) == 0
? 2
: 0;
for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) {
HloInstruction* matched_cp;
if (Match(inst,
m::CollectivePermute(
&matched_cp, m::GetTupleElement(m::Parameter(),
force_delay_cp_gte_index)))) {
TF_RETURN_IF_ERROR(
SetForceDelayForInstruction(matched_cp, true));
}
if (inst->opcode() == HloOpcode::kDot) {
TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(inst, stream_id));
++stream_id;
}
}
return absl::OkStatus();
}
struct MatchedGemmA2aResult {
HloInstruction* producer_gemm;
HloInstruction* lhs;
HloInstruction* rhs;
HloInstruction* a2a_replacement = nullptr;
bool matched = false;
};
class WindowedEinsumVisitor : public DfsHloRewriteVisitor {
public:
explicit WindowedEinsumVisitor(
std::vector<WindowedEinsumHandler::WindowedEinsumAgLoops>& all_ag_loops)
: all_ag_loops_(all_ag_loops) {}
absl::StatusOr<bool> MatchA2aGemmWithIntermediateReshapes(
HloInstruction* dot, HloInstruction** lhs, HloInstruction** rhs) {
if (Match(dot, m::Dot(m::AllToAll(lhs).WithOneUse().WithPredicate(
HasReplicaGroups),
m::Op(rhs))) &&
!DynCast<HloAllToAllInstruction>((*lhs))->constrain_layout() &&
!(*lhs)->shape().IsTuple()) {
return true;
}
std::vector<HloInstruction*> allowed_intermediate_ops(
{dot->mutable_operand(0)});
HloAllToAllInstruction* matched_a2a = nullptr;
while (true) {
HloInstruction* curr = allowed_intermediate_ops.back();
if (ShouldAddToChain(curr)) {
allowed_intermediate_ops.insert(allowed_intermediate_ops.end(),
std::begin(curr->operands()),
std::end(curr->operands()));
} else if (curr->opcode() == HloOpcode::kAllToAll &&
curr->user_count() == 1) {
matched_a2a = DynCast<HloAllToAllInstruction>(curr);
allowed_intermediate_ops.pop_back();
break;
} else {
return false;
}
}
CHECK(matched_a2a != nullptr);
if (matched_a2a->constrain_layout() || matched_a2a->shape().IsTuple() ||
!HasReplicaGroups(matched_a2a) || !matched_a2a->split_dimension()) {
return false;
}
int64_t split_dimension = *matched_a2a->split_dimension();
for (int64_t i = allowed_intermediate_ops.size() - 1; i >= 0; i--) {
HloInstruction* current_op = allowed_intermediate_ops[i];
if (current_op->opcode() == HloOpcode::kReshape) {
std::vector<std::pair<int64_t, int64_t>> unmodified_dims =
ShapeUtil::DimensionsUnmodifiedByReshape(
current_op->operand(0)->shape(), current_op->shape());
auto it = absl::c_find_if(
unmodified_dims,
[&split_dimension](std::pair<int64_t, int64_t>& dim_pair) {
return dim_pair.first == split_dimension;
});
if (it == unmodified_dims.end()) {
VLOG(5) << "Split dimension of: " << matched_a2a->ToShortString()
<< " has been modified by reshapes. Skip process it for "
"decomposition.";
return false;
}
split_dimension = it->second;
} else if (current_op->opcode() == HloOpcode::kTranspose) {
const auto& transpose_dims = current_op->dimensions();
for (int64_t j = 0; j < transpose_dims.size(); j++) {
if ((int64_t)transpose_dims[j] == split_dimension) {
split_dimension = j;
break;
}
}
}
}
TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith(
0, matched_a2a->mutable_operand(0)));
HloInstruction* new_a2a =
matched_a2a->parent()->AddInstruction(HloInstruction::CreateAllToAll(
allowed_intermediate_ops.front()->shape(),
{allowed_intermediate_ops.front()}, matched_a2a->replica_groups(),
false, hlo_query::NextChannelId(*matched_a2a->GetModule()),
split_dimension));
TF_RETURN_IF_ERROR(dot->ReplaceOperandWith(0, new_a2a));
TF_RETURN_IF_ERROR(
matched_a2a->parent()->RemoveInstructionAndUnusedOperands(matched_a2a));
MarkAsChanged();
*lhs = new_a2a;
*rhs = dot->mutable_operand(1);
return true;
}
absl::Status HandleDot(HloInstruction* dot) override {
CHECK_EQ(dot->opcode(), HloOpcode::kDot);
HloComputation* comp = dot->parent();
for (WindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop :
all_ag_loops_) {
HloComputation* comp = dot->parent();
HloInstruction* loop = ag_loop.loop;
HloInstruction* windowed_lhs =
loop->mutable_operand(0)->mutable_operand(0);
HloInstruction *all_gather, *binary, *scale = nullptr;
auto all_gather_optionally_dequantized = m::AnyOf<HloInstruction>(
m::AllGather(&all_gather,
m::Divide(&binary, m::Convert(m::Op().Is(windowed_lhs)),
m::Broadcast(m::Op(&scale)))),
m::AllGather(
&all_gather,
m::MultiplyAnyOrder(&binary, m::Convert(m::Op().Is(windowed_lhs)),
m::Broadcast(m::Op(&scale)))),
m::AllGather(&all_gather, m::Op().Is(windowed_lhs)));
if (!Match(dot, m::Dot(all_gather_optionally_dequantized, m::Op())) &&
!Match(dot, m::Dot(m::Op(), all_gather_optionally_dequantized))) {
continue;
}
if (scale) {
if (!ShapeUtil::IsScalar(scale->shape())) {
continue;
}
if (windowed_lhs->shape().element_type() != F8E4M3FN &&
windowed_lhs->shape().element_type() != F8E5M2) {
continue;
}
if (binary->shape().element_type() != BF16 &&
binary->shape().element_type() != F16 &&
binary->shape().element_type() != F32) {
continue;
}
}
if (!ag_loop.consumed) {
Literal zero_literal =
LiteralUtil::Zero(windowed_lhs->shape().element_type());
HloInstruction* zero = comp->AddInstruction(
HloInstruction::CreateConstant(std::move(zero_literal)));
Shape zero_bcast_shape = ShapeUtil::ChangeElementType(
all_gather->shape(), windowed_lhs->shape().element_type());
HloInstruction* zero_bcast =
MakeBroadcastHlo(zero, {}, zero_bcast_shape);
loop->mutable_operand(0)->AppendOperand(zero_bcast);
ShapeUtil::AppendShapeToTuple(
zero_bcast->shape(), loop->mutable_operand(0)->mutable_shape());
for (HloComputation* while_comp :
{loop->while_body(), loop->while_condition()}) {
while_comp->ReplaceParameter(
0, HloInstruction::CreateParameter(
0, loop->mutable_operand(0)->shape(),
while_comp->parameter_instruction(0)->name()));
}
*loop->mutable_shape() = loop->operand(0)->shape();
VLOG(5) << "Found all-gather that shares the same operand with a "
"windowed einsum loop : "
<< loop->ToString();
TF_RETURN_IF_ERROR(
ProcessWindowedEinsumLoopForActivationCaching(ag_loop));
ag_loop.consumed = true;
}
int64_t cache_output_index = dot->operand_index(all_gather);
HloInstruction* new_gte =
comp->AddInstruction(HloInstruction::CreateGetTupleElement(
loop, GetAgActivationCacheIndex(loop)));
HloInstruction* new_gte_scaled;
if (scale) {
HloInstruction* new_convert =
MakeConvertToHlo(new_gte, binary->shape().element_type());
HloInstruction* bcast_scale =
MakeBroadcastHlo(scale, {}, new_convert->shape());
TF_ASSIGN_OR_RETURN(
new_gte_scaled,
MakeBinaryHlo(binary->opcode(), new_convert, bcast_scale));
}
TF_RETURN_IF_ERROR(dot->ReplaceOperandWith(
cache_output_index, scale ? new_gte_scaled : new_gte));
if (all_gather->user_count() == 0) {
TF_RETURN_IF_ERROR(comp->RemoveInstruction(all_gather));
}
}
HloInstruction* lhs;
HloInstruction* rhs;
std::vector<xla::ReplicaGroup> replica_groups;
TF_ASSIGN_OR_RETURN(bool matched,
MatchA2aGemmWithIntermediateReshapes(dot, &lhs, &rhs));
if (matched) {
replica_groups = lhs->replica_groups();
int64_t group_size = replica_groups[0].replica_ids_size();
if (absl::c_find_if(replica_groups, [&](ReplicaGroup& group) {
return group.replica_ids_size() != group_size;
}) != replica_groups.end()) {
VLOG(5) << "All-to-all split groups don't have the same number of "
"replicas.";
return absl::OkStatus();
}
const DotDimensionNumbers& original_dot_dnums =
dot->dot_dimension_numbers();
const PrecisionConfig& original_precision = dot->precision_config();
const auto& lhs_contracting_dims =
dot->dot_dimension_numbers().lhs_contracting_dimensions();
const auto& rhs_contracting_dims =
dot->dot_dimension_numbers().rhs_contracting_dimensions();
if (lhs_contracting_dims.size() != 1 ||
rhs_contracting_dims.size() != 1) {
VLOG(5) << "Contracting dimensions have multiple elements, all-to-all "
"sharding will be skipped.";
return absl::OkStatus();
}
int64_t lhs_contracting_dim = lhs_contracting_dims[0];
int64_t rhs_contracting_dim = rhs_contracting_dims[0];
HloAllToAllInstruction* a2a = DynCast<HloAllToAllInstruction>(lhs);
int64_t contracting_dim_value =
rhs->shape().dimensions()[rhs_contracting_dim];
std::vector<int64_t> lhs_slice_sizes(a2a->shape().rank(), 0);
std::vector<int64_t> lhs_slice_increments(a2a->shape().rank(), 1);
std::vector<int64_t> lhs_slice_max_range(
a2a->shape().dimensions().begin(), a2a->shape().dimensions().end());
std::vector<int64_t> rhs_slice_sizes(rhs->shape().rank(), 0);
std::vector<int64_t> rhs_slice_increments(rhs->shape().rank(), 1);
std::vector<int64_t> rhs_slice_max_range(
rhs->shape().dimensions().begin(), rhs->shape().dimensions().end());
HloInstruction* output_buffer =
comp->AddInstruction(HloInstruction::CreateBroadcast(
dot->shape(),
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(dot->shape().element_type()))),
{}));
HloInstruction* a2a_operand = a2a->mutable_operand(0);
if (contracting_dim_value % group_size) {
VLOG(5) << absl::StrFormat(
"Contracting dimension %d needs to be divisible by group_size %d",
contracting_dim_value, group_size);
return absl::OkStatus();
}
int64_t size_per_split = contracting_dim_value / group_size;
lhs_slice_max_range[lhs_contracting_dim] = size_per_split;
rhs_slice_max_range[rhs_contracting_dim] = size_per_split;
Shape lhs_slice_shape = a2a->shape();
Shape rhs_slice_shape = rhs->shape();
lhs_slice_shape.set_dimensions(lhs_contracting_dim, size_per_split);
rhs_slice_shape.set_dimensions(rhs_contracting_dim, size_per_split);
HloInstruction* lhs_slice;
HloInstruction* rhs_slice;
HloInstruction* partial_result = output_buffer;
Shape partial_all_to_all_shape = lhs_slice_shape;
TF_ASSIGN_OR_RETURN(
Shape partial_dot_shape,
ShapeInference::InferDotOpShape(
partial_all_to_all_shape, rhs_slice_shape, original_dot_dnums,
std::nullopt));
int64_t stream_id = hlo_query::NextChannelId(*a2a->GetModule());
for (int64_t i = 0; i < group_size; ++i) {
lhs_slice = comp->AddInstruction(HloInstruction::CreateSlice(
lhs_slice_shape, a2a_operand, lhs_slice_sizes, lhs_slice_max_range,
lhs_slice_increments));
a2a->SetupDerivedInstruction(lhs_slice);
lhs_slice_sizes[lhs_contracting_dim] =
lhs_slice_max_range[lhs_contracting_dim];
lhs_slice_max_range[lhs_contracting_dim] += size_per_split;
rhs_slice = comp->AddInstruction(HloInstruction::CreateSlice(
rhs_slice_shape, rhs, rhs_slice_sizes, rhs_slice_max_range,
rhs_slice_increments));
a2a->SetupDerivedInstruction(rhs_slice);
rhs_slice_sizes[rhs_contracting_dim] =
rhs_slice_max_range[rhs_contracting_dim];
rhs_slice_max_range[rhs_contracting_dim] += size_per_split;
HloInstruction* partial_all_to_all =
comp->AddInstruction(HloInstruction::CreateAllToAll(
partial_all_to_all_shape, {lhs_slice}, a2a->device_list(),
false, hlo_query::NextChannelId(*a2a->GetModule()),
a2a->split_dimension()));
a2a->SetupDerivedInstruction(partial_all_to_all);
HloInstruction* partial_dot =
comp->AddInstruction(HloInstruction::CreateDot(
partial_dot_shape, partial_all_to_all, rhs_slice,
original_dot_dnums, original_precision));
partial_result = comp->AddInstruction(
HloInstruction::CreateBinary(partial_dot->shape(), HloOpcode::kAdd,
partial_dot, partial_result));
a2a->SetupDerivedInstruction(partial_result);
TF_RETURN_IF_ERROR(
UpdateDotAndConsumerConfig(partial_dot, stream_id++));
}
TF_RETURN_IF_ERROR(ReplaceInstruction(dot, partial_result));
}
return absl::OkStatus();
}
absl::StatusOr<MatchedGemmA2aResult> MatchGemmA2aWithIntermediateReshapes(
HloInstruction* inst) {
MatchedGemmA2aResult result;
HloAllToAllInstruction* a2a = DynCast<HloAllToAllInstruction>(inst);
if (!HasReplicaGroups(a2a) || a2a->constrain_layout() ||
a2a->shape().IsTuple()) {
return result;
}
if (Match(a2a, m::AllToAll(m::Dot(&result.producer_gemm, m::Op(&result.lhs),
m::Op(&result.rhs))
.WithOneUse()))) {
result.matched = true;
return result;
}
std::vector<HloInstruction*> allowed_intermediate_ops(
{a2a->mutable_operand(0)});
HloInstruction* matched_dot = nullptr;
while (true) {
HloInstruction* curr = allowed_intermediate_ops.back();
if (ShouldAddToChain(curr)) {
allowed_intermediate_ops.insert(allowed_intermediate_ops.end(),
std::begin(curr->operands()),
std::end(curr->operands()));
} else if (curr->opcode() == HloOpcode::kDot && curr->user_count() == 1) {
matched_dot = curr;
allowed_intermediate_ops.pop_back();
break;
} else {
return result;
}
}
CHECK(matched_dot != nullptr);
int64_t split_dimension = *a2a->split_dimension();
for (int64_t i = 0; i < allowed_intermediate_ops.size(); i++) {
HloInstruction* current_op = allowed_intermediate_ops[i];
if (current_op->opcode() == HloOpcode::kReshape) {
std::vector<std::pair<int64_t, int64_t>> unmodified_dims =
ShapeUtil::DimensionsUnmodifiedByReshape(
current_op->operand(0)->shape(), current_op->shape());
auto it = absl::c_find_if(
unmodified_dims,
[&split_dimension](std::pair<int64_t, int64_t>& dim_pair) {
return dim_pair.second == split_dimension;
});
if (it == unmodified_dims.end()) {
VLOG(5) << "Split dimension of: " << a2a->ToShortString()
<< " has been modified by reshapes. Skip process it for "
"decomposition.";
return result;
}
split_dimension = it->first;
} else if (current_op->opcode() == HloOpcode::kTranspose) {
const auto& transpose_dims = current_op->dimensions();
split_dimension = transpose_dims[split_dimension];
}
}
result.a2a_replacement =
matched_dot->parent()->AddInstruction(HloInstruction::CreateAllToAll(
matched_dot->shape(), {matched_dot}, a2a->replica_groups(), false,
hlo_query::NextChannelId(*matched_dot->GetModule()),
split_dimension));
TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith(
0, result.a2a_replacement));
inst->SetupDerivedInstruction(result.a2a_replacement);
TF_RETURN_IF_ERROR(
ReplaceInstruction(inst, allowed_intermediate_ops.front()));
result.lhs = matched_dot->mutable_operand(0);
result.rhs = matched_dot->mutable_operand(1);
result.producer_gemm = matched_dot;
result.matched = true;
return result;
}
absl::Status HandleAllToAll(HloInstruction* inst) override {
CHECK_EQ(inst->opcode(), HloOpcode::kAllToAll);
HloComputation* comp = inst->parent();
std::vector<xla::ReplicaGroup> replica_groups;
TF_ASSIGN_OR_RETURN(MatchedGemmA2aResult matched_result,
MatchGemmA2aWithIntermediateReshapes(inst));
if (matched_result.matched) {
HloInstruction* a2a = inst;
if (matched_result.a2a_replacement) {
a2a = matched_result.a2a_replacement;
}
replica_groups = a2a->replica_groups();
int64_t group_size = replica_groups[0].replica_ids_size();
if (absl::c_find_if(replica_groups, [&](ReplicaGroup& group) {
return group.replica_ids_size() != group_size;
}) != replica_groups.end()) {
VLOG(5) << "All-to-all split groups don't have the same number of "
"replicas.";
return absl::OkStatus();
}
const DotDimensionNumbers& original_dot_dnums =
matched_result.producer_gemm->dot_dimension_numbers();
const PrecisionConfig& original_precision =
matched_result.producer_gemm->precision_config();
const auto& lhs_contracting_dims =
matched_result.producer_gemm->dot_dimension_numbers()
.lhs_contracting_dimensions();
const auto& rhs_contracting_dims =
matched_result.producer_gemm->dot_dimension_numbers()
.rhs_contracting_dimensions();
if (lhs_contracting_dims.size() != 1 ||
rhs_contracting_dims.size() != 1) {
VLOG(5) << "Contracting dimensions have multiple elements, all-to-all "
"sharding will be skipped.";
return absl::OkStatus();
}
int64_t lhs_contracting_dim = lhs_contracting_dims[0];
int64_t rhs_contracting_dim = rhs_contracting_dims[0];
HloAllToAllInstruction* all_to_all = DynCast<HloAllToAllInstruction>(a2a);
int64_t contracting_dim_value =
matched_result.rhs->shape().dimensions()[rhs_contracting_dim];
std::vector<int64_t> lhs_slice_sizes(matched_result.lhs->shape().rank(),
0);
std::vector<int64_t> lhs_slice_increments(
matched_result.lhs->shape().rank(), 1);
std::vector<int64_t> lhs_slice_max_range(
matched_result.lhs->shape().dimensions().begin(),
matched_result.lhs->shape().dimensions().end());
std::vector<int64_t> rhs_slice_sizes(matched_result.rhs->shape().rank(),
0);
std::vector<int64_t> rhs_slice_increments(
matched_result.rhs->shape().rank(), 1);
std::vector<int64_t> rhs_slice_max_range(
matched_result.rhs->shape().dimensions().begin(),
matched_result.rhs->shape().dimensions().end());
HloInstruction* output_buffer =
comp->AddInstruction(HloInstruction::CreateBroadcast(
all_to_all->shape(),
comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(all_to_all->shape().element_type()))),
{}));
if (contracting_dim_value % group_size) {
VLOG(5) << absl::StrFormat(
"Contracting dimension %d needs to be divisible by group_size %d",
contracting_dim_value, group_size);
return absl::OkStatus();
}
int64_t size_per_split = contracting_dim_value / group_size;
lhs_slice_max_range[lhs_contracting_dim] = size_per_split;
rhs_slice_max_range[rhs_contracting_dim] = size_per_split;
Shape lhs_slice_shape = matched_result.lhs->shape();
Shape rhs_slice_shape = matched_result.rhs->shape();
lhs_slice_shape.set_dimensions(lhs_contracting_dim, size_per_split);
rhs_slice_shape.set_dimensions(rhs_contracting_dim, size_per_split);
HloInstruction* lhs_slice;
HloInstruction* rhs_slice;
HloInstruction* partial_result = output_buffer;
Shape partial_all_to_all_shape = all_to_all->shape();
TF_ASSIGN_OR_RETURN(
Shape partial_dot_shape,
ShapeInference::InferDotOpShape(
lhs_slice_shape, rhs_slice_shape, original_dot_dnums,
std::nullopt));
int64_t stream_id = hlo_query::NextChannelId(*all_to_all->GetModule());
for (int64_t i = 0; i < group_size; ++i) {
lhs_slice = comp->AddInstruction(HloInstruction::CreateSlice(
lhs_slice_shape, matched_result.lhs, lhs_slice_sizes,
lhs_slice_max_range, lhs_slice_increments));
all_to_all->SetupDerivedInstruction(lhs_slice);
lhs_slice_sizes[lhs_contracting_dim] =
lhs_slice_max_range[lhs_contracting_dim];
lhs_slice_max_range[lhs_contracting_dim] += size_per_split;
rhs_slice = comp->AddInstruction(HloInstruction::CreateSlice(
rhs_slice_shape, matched_result.rhs, rhs_slice_sizes,
rhs_slice_max_range, rhs_slice_increments));
all_to_all->SetupDerivedInstruction(rhs_slice);
rhs_slice_sizes[rhs_contracting_dim] =
rhs_slice_max_range[rhs_contracting_dim];
rhs_slice_max_range[rhs_contracting_dim] += size_per_split;
HloInstruction* partial_dot = comp->AddInstruction(
HloInstruction::CreateDot(partial_dot_shape, lhs_slice, rhs_slice,
original_dot_dnums, original_precision));
HloInstruction* partial_all_to_all =
comp->AddInstruction(HloInstruction::CreateAllToAll(
partial_all_to_all_shape, {partial_dot},
all_to_all->device_list(), false,
hlo_query::NextChannelId(*all_to_all->GetModule()),
all_to_all->split_dimension()));
all_to_all->SetupDerivedInstruction(partial_all_to_all);
partial_result = comp->AddInstruction(HloInstruction::CreateBinary(
partial_all_to_all_shape, HloOpcode::kAdd, partial_all_to_all,
partial_result));
all_to_all->SetupDerivedInstruction(partial_result);
TF_RETURN_IF_ERROR(
UpdateDotAndConsumerConfig(partial_dot, stream_id++));
}
TF_RETURN_IF_ERROR(ReplaceInstruction(all_to_all, partial_result));
}
return absl::OkStatus();
}
private:
std::vector<WindowedEinsumHandler::WindowedEinsumAgLoops>& all_ag_loops_;
};
}
absl::StatusOr<bool> WindowedEinsumHandler::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
5, "WindowedEinsumHandler::Run(), before:\n" + module->ToString());
bool changed = false;
int64_t stream_id = hlo_query::NextChannelId(*module);
std::vector<HloInstruction*> all_windowed_einsum_loops;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) {
continue;
}
if (comp->name().find(kWindowedEinsumRsLoopName) == 0 ||
comp->name().find(kWindowedEinsumAgLoopName) == 0) {
VLOG(5) << "Processing computation: " << comp->name();
TF_ASSIGN_OR_RETURN(changed, ShiftDequantizationF8(comp));
if (comp->name().find(kWindowedEinsumAgLoopName) == 0) {
all_ag_loops_.push_back(
WindowedEinsumAgLoops(comp->WhileCallInstruction()));
}
all_windowed_einsum_loops.push_back(comp->WhileCallInstruction());
}
}
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
WindowedEinsumVisitor visitor(all_ag_loops_);
TF_RETURN_IF_ERROR(comp->Accept(&visitor));
changed |= visitor.changed();
}
if (!all_windowed_einsum_loops.empty()) {
TF_ASSIGN_OR_RETURN(bool applied_algsimp,
AlgebraicSimplifier(AlgebraicSimplifierOptions())
.Run(module, execution_threads));
changed |= applied_algsimp;
TF_ASSIGN_OR_RETURN(bool applied_cf,
HloConstantFolding().Run(module, execution_threads));
changed |= applied_cf;
}
for (HloInstruction* loop : all_windowed_einsum_loops) {
VLOG(5) << "Processing " << loop->ToString() << " for unrolling.";
std::string original_body_name = std::string(loop->while_body()->name());
std::string original_cond_name =
std::string(loop->while_condition()->name());
TF_ASSIGN_OR_RETURN(
UnrollResult result,
WhileLoopUnroller::UnrollAndReturnReplacement(
loop, -1, true,
false));
if (result.unrolled) {
result.new_while_op->while_body()->SetAndSanitizeName(
absl::StrCat("unrolled_", original_body_name));
result.new_while_op->while_condition()->SetAndSanitizeName(
absl::StrCat("unrolled_", original_cond_name));
xla::FrontendAttributes attributes;
(*attributes.mutable_map())["skip-simplify-while-loops_trip-count-one"] =
"true";
result.new_while_op->add_frontend_attributes(attributes);
TF_RETURN_IF_ERROR(
PostProcessUnrolledLoop(result.new_while_op, stream_id));
}
changed |= result.unrolled;
}
XLA_VLOG_LINES(5,
"WindowedEinsumHandler::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/gpu/transforms/windowed_einsum_handler.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
using WindowedEinsumHandlerTest = HloTestBase;
HloInstruction* FindInstructionByName(HloComputation* comp, std::string name) {
for (auto inst : comp->instructions()) {
if (inst->name() == name) {
return inst;
}
}
return nullptr;
}
TEST_F(WindowedEinsumHandlerTest, AgLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,512,24576]{2,1,0}, bf16[24576,24576]{1,0})->bf16[2048,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_ag.1 {
param = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element = bf16[512,24576]{1,0} get-tuple-element(param), index=0
collective-permute.send_first_lhs_shard = bf16[512,24576]{1,0} collective-permute(get-tuple-element), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.lhs = bf16[24576,24576]{1,0} get-tuple-element(param), index=1
get-tuple-element.rhs = bf16[2048,24576]{1,0} get-tuple-element(param), index=2
dot.2 = bf16[512,24576]{1,0} dot(get-tuple-element, get-tuple-element.lhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.1 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.4 = u32[] get-tuple-element(param), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.4, partition-id)
constant = u32[] constant(4)
remainder = u32[] remainder(add, constant)
dynamic-slice = s32[1]{0} dynamic-slice(constant.1, remainder), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice)
constant.2 = s32[] constant(0)
dynamic-update-slice = bf16[2048,24576]{1,0} dynamic-update-slice(get-tuple-element.rhs, dot.2, reshape.4, constant.2), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
dot.3 = bf16[512,24576]{1,0} dot(collective-permute.send_first_lhs_shard, get-tuple-element.lhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.3 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.4, constant.3)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.1, remainder.1), dynamic_slice_sizes={1}
reshape.5 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2048,24576]{1,0} dynamic-update-slice(dynamic-update-slice, dot.3, reshape.5, constant.2)
get-tuple-element.3 = bf16[2048,24576]{1,0} get-tuple-element(param), index=3
add.3 = u32[] add(add.1, constant.3)
ROOT tuple = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(collective-permute.send_first_lhs_shard, get-tuple-element.lhs, dynamic-update-slice.1, get-tuple-element.3, add.3)
}
windowed_dot_general_cond_ag {
param.1 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
constant.8 = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element.5, constant.8), direction=LT
}
ENTRY test_main {
param.4 = bf16[1,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
reshape.8 = bf16[512,24576]{1,0} reshape(param.4)
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.18 = bf16[] constant(0)
broadcast = bf16[2048,24576]{1,0} broadcast(constant.18), dimensions={}
constant.20 = u32[] constant(0)
tuple.2 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(reshape.8, param.5, broadcast, broadcast, constant.20)
while = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag.1
ROOT get-tuple-element.13 = bf16[2048,24576]{1,0} get-tuple-element(while), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* ag_loop_body = ag_loop->while_body();
int64_t dot_count = 0;
for (HloInstruction* inst : ag_loop_body->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kDot) {
dot_count++;
EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(),
0);
}
}
EXPECT_EQ(dot_count, 4);
HloInstruction* cp1 = FindInstructionByName(
ag_loop_body, "collective-permute.send_first_lhs_shard.3");
EXPECT_TRUE(
cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule());
}
TEST_F(WindowedEinsumHandlerTest, RsLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[2048,24576]{1,0})->bf16[512,24576]{1,0}}, num_partitions=4
windowed_dot_general_body_rs_clone.1 {
param.2 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.6 = bf16[2048,24576]{1,0} get-tuple-element(param.2), index=0
get-tuple-element.7 = bf16[24576,24576]{1,0} get-tuple-element(param.2), index=1
get-tuple-element.9 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=2
collective-permute.send_second_lhs_shard = bf16[512,24576]{1,0} collective-permute(get-tuple-element.9), channel_id=4, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
constant.10 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.11 = u32[] get-tuple-element(param.2), index=4
constant.12 = u32[] constant(2)
add.8 = u32[] add(get-tuple-element.11, constant.12)
constant.13 = u32[] constant(1)
add.9 = u32[] add(add.8, constant.13)
partition-id.3 = u32[] partition-id()
add.10 = u32[] add(add.9, partition-id.3)
constant.9 = u32[] constant(4)
remainder.3 = u32[] remainder(add.10, constant.9)
dynamic-slice.4 = s32[1]{0} dynamic-slice(constant.10, remainder.3), dynamic_slice_sizes={1}
reshape.7 = s32[] reshape(dynamic-slice.4)
constant.11 = s32[] constant(0)
dynamic-slice.5 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.7, constant.11), dynamic_slice_sizes={512,24576}
dot.7 = bf16[512,24576]{1,0} dot(dynamic-slice.5, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.11 = bf16[512,24576]{1,0} add(collective-permute.send_second_lhs_shard, dot.7), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
get-tuple-element.10 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=3
add.6 = u32[] add(get-tuple-element.11, partition-id.3)
remainder.2 = u32[] remainder(add.6, constant.9)
dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.10, remainder.2), dynamic_slice_sizes={1}
reshape.6 = s32[] reshape(dynamic-slice.2)
dynamic-slice.3 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.6, constant.11), dynamic_slice_sizes={512,24576}
dot.5 = bf16[512,24576]{1,0} dot(dynamic-slice.3, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
add.7 = bf16[512,24576]{1,0} add(get-tuple-element.10, dot.5), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]}
collective-permute.2 = bf16[512,24576]{1,0} collective-permute(add.7), channel_id=5, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}
ROOT tuple.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(get-tuple-element.6, get-tuple-element.7, add.11, collective-permute.2, add.8)
}
windowed_dot_general_cond_rs {
param.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0)
get-tuple-element.12 = u32[] get-tuple-element(param.3), index=4
constant.17 = u32[] constant(4)
ROOT compare.1 = pred[] compare(get-tuple-element.12, constant.17), direction=LT
}
ENTRY main.9_spmd {
param.6 = bf16[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]}
param.7 = bf16[512,24576]{1,0} parameter(1)
param.8 = bf16[2048,24576]{1,0} parameter(2)
constant.20 = u32[] constant(0)
tuple.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(param.8, param.6, param.7, param.7, constant.20)
while.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs_clone.1
ROOT get-tuple-element.14 = bf16[512,24576]{1,0} get-tuple-element(while.1), index=2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* rs_loop =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloComputation* rs_loop_body = rs_loop->while_body();
int64_t dot_count = 0;
for (HloInstruction* inst : rs_loop_body->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kDot) {
dot_count++;
EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(),
0);
}
}
EXPECT_EQ(dot_count, 4);
}
TEST_F(WindowedEinsumHandlerTest, AgLoopsMultipleConsumersAreChained) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[24576,24576]{1,0})->bf16[2,2048,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_ag {
param.1 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element.lhs = bf16[2,512,24576]{2,1,0} get-tuple-element(param.1), index=0
collective-permute.send_first_lhs_shard = bf16[2,512,24576]{2,1,0} collective-permute(get-tuple-element.lhs), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
collective-permute.send_second_lhs_shard = bf16[2,512,24576]{2,1,0} collective-permute(collective-permute.send_first_lhs_shard), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
get-tuple-element.rhs = bf16[24576,24576]{1,0} get-tuple-element(param.1), index=1
get-tuple-element.3 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=2
dot = bf16[2,512,24576]{2,1,0} dot(get-tuple-element.lhs, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.2 = s32[] constant(0)
constant.3 = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4
partition-id = u32[] partition-id()
add = u32[] add(get-tuple-element.5, partition-id)
constant.1 = u32[] constant(4)
remainder = u32[] remainder(add, constant.1)
dynamic-slice = s32[1]{0} dynamic-slice(constant.3, remainder), dynamic_slice_sizes={1}
reshape = s32[] reshape(dynamic-slice)
dynamic-update-slice = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(get-tuple-element.3, dot, constant.2, reshape, constant.2)
dot.1 = bf16[2,512,24576]{2,1,0} dot(collective-permute.send_first_lhs_shard, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.5 = u32[] constant(1)
add.1 = u32[] add(get-tuple-element.5, constant.5)
add.2 = u32[] add(add.1, partition-id)
remainder.1 = u32[] remainder(add.2, constant.1)
dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.3, remainder.1), dynamic_slice_sizes={1}
reshape.1 = s32[] reshape(dynamic-slice.1)
dynamic-update-slice.1 = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(dynamic-update-slice, dot.1, constant.2, reshape.1, constant.2)
get-tuple-element.4 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=3
add.3 = u32[] add(add.1, constant.5)
ROOT tuple = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(collective-permute.send_second_lhs_shard, get-tuple-element.rhs, dynamic-update-slice.1, get-tuple-element.4, add.3)
}
windowed_dot_general_cond_ag {
param = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(param), index=4
constant = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT
}
ENTRY main.12_spmd {
param.4 = bf16[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
constant.22 = bf16[] constant(0)
broadcast = bf16[2,2048,24576]{2,1,0} broadcast(constant.22), dimensions={}
constant.24 = u32[] constant(0)
tuple.2 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24)
while = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
get-tuple-element.13 = bf16[2,2048,24576]{2,1,0} get-tuple-element(while), index=2
copy.1 = bf16[2,2048,24576]{2,1,0} copy(get-tuple-element.13)
all-gather = bf16[2,2048,24576]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
param.6 = bf16[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
ROOT dot.7 = bf16[2,2048,24576]{2,1,0} dot(all-gather, param.6), lhs_contracting_dims={2}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* inst =
FindInstructionByName(module->entry_computation(), "dot.7");
EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(inst->operand(0)->tuple_index(), 5);
const HloInstruction* while_loop = inst->operand(0)->operand(0);
EXPECT_EQ(while_loop->opcode(), HloOpcode::kWhile);
HloComputation* while_body = while_loop->while_body();
int64_t dot_count = 0;
for (HloInstruction* ins : while_body->MakeInstructionPostOrder()) {
if (ins->opcode() == HloOpcode::kDot) {
dot_count++;
EXPECT_GT(ins->backend_config<GpuBackendConfig>()->operation_queue_id(),
0);
}
}
EXPECT_EQ(dot_count, 4);
HloInstruction* ag_loop =
FindInstructionByName(module->entry_computation(), "while");
HloInstruction* ag_while_root = ag_loop->while_body()->root_instruction();
EXPECT_THAT(
ag_while_root,
GmockMatch(m::Tuple(
m::Op(), m::Op(), m::Op(), m::Op(), m::Op(),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(
m::GetTupleElement(
m::Tuple(m::Op(), m::Op(), m::Op(), m::Op(), m::Op(),
m::DynamicUpdateSlice(
m::DynamicUpdateSlice(
m::GetTupleElement(m::Parameter())
.WithPredicate(
[](const HloInstruction* instr) {
return instr->tuple_index() ==
5;
}),
m::Op(), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op(), m::Op())))
.WithPredicate([](const HloInstruction* instr) {
return instr->tuple_index() == 5;
}),
m::Op(), m::Op(), m::Op(), m::Op()),
m::Op(), m::Op(), m::Op(), m::Op()))));
EXPECT_EQ(FindInstructionByName(module->entry_computation(), "all-gather"),
nullptr);
}
TEST_F(WindowedEinsumHandlerTest, A2aGemmHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,8192]{3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=8
ENTRY main.9_spmd {
param0 = bf16[1,8192,32768]{2,1,0} parameter(0)
param1 = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(param1), channel_id=4, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={1}
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(all-to-all, param0), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3},{4,5,6,7}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["5"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false}
CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(WindowedEinsumHandlerTest, GemmA2aHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,32768]{3,2,1,0})->bf16[1,4,2048,8192]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
dot.12 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.10, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}
ROOT all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(dot.12), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(1)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [24576:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [16384:24576]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [8192:16384]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]])
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]])
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]])
CHECK: ROOT {{.*}} = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(WindowedEinsumHandlerTest, A2aTransposeLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,8192,32768]{2,1,0} parameter(0)
param.10 = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
all-to-all = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} all-to-all(param.10), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={3}
transpose.15 = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(all-to-all), dimensions={0,3,1,2,4,5}
reshape.2170 = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(transpose.15)
reshape.2173 = bf16[4,8192,1,2048]{3,2,1,0} reshape(reshape.2170)
transpose.16 = bf16[1,4,2048,8192]{2,0,3,1} transpose(reshape.2173), dimensions={2,0,3,1}
copy.53 = bf16[1,4,2048,8192]{3,2,1,0} copy(transpose.16)
ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(copy.53, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1)
CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} %[[P1:.*]]), dimensions={0,3,1,2,4,5}
CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} %[[TRANSPOSE0:.*]])
CHECK-DAG: %[[RESHAPE1:.*]] = bf16[4,8192,1,2048]{3,2,1,0} reshape(bf16[1,4,8192,1,2048]{4,3,2,1,0} %[[RESHAPE0:.*]])
CHECK-DAG: %[[TRANSPOSE1:.*]] = bf16[1,4,2048,8192]{2,0,3,1} transpose(bf16[4,8192,1,2048]{3,2,1,0} %[[RESHAPE1:.*]]), dimensions={2,0,3,1}
CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{2,0,3,1} %[[TRANSPOSE1:.*]])
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [6144:8192]}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [4096:6144]}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [2048:4096]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [0:2048]}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false}
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["8"],"force_earliest_schedule":false}
CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(WindowedEinsumHandlerTest, GemmA2aTransposeLoopsHaveStreamIds) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,4,2048,32768]{3,2,1,0}, bf16[1,32768,8192]{2,1,0})->bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0}}, num_partitions=4
ENTRY main.9_spmd {
param.9 = bf16[1,4,2048,32768]{3,2,1,0} parameter(0)
param.10 = bf16[1,32768,8192]{2,1,0} parameter(1)
dot.13 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.9, param.10), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}
copy.55 = bf16[1,4,2048,8192]{3,2,1,0} copy(dot.13)
transpose.17 = bf16[4,1,2048,8192]{3,2,0,1} transpose(copy.55), dimensions={1,0,2,3}
copy.56 = bf16[4,1,2048,8192]{3,2,1,0} copy(transpose.17)
reshape.2216 = bf16[1,4,1,2048,8192]{4,3,2,1,0} reshape(copy.56)
reshape.2219 = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} reshape(reshape.2216)
ROOT all-to-all.1 = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} all-to-all(reshape.2219), channel_id=7, replica_groups={{0,1,2,3}}, dimensions={1}
}
)";
const char* kExpected = R"(
CHECK: ENTRY
CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(0)
CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]}
CHECK-DAG: %[[P0:.*]] = bf16[1,32768,8192]{2,1,0} parameter(1)
CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [24576:32768], [0:8192]}
CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"12","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]}
CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [16384:24576], [0:8192]}
CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"11","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]}
CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [8192:16384], [0:8192]}
CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"10","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]}
CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]}
CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false}
CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]),
CHECK: replica_groups={
CHECK: {0,1,2,3}
CHECK: }
CHECK: dimensions={1}
CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0)
CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={}
CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]])
CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]])
CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]])
CHECK-DAG: %[[ADD3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]])
CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{3,2,1,0} %[[ADD3:.*]])
CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[4,1,2048,8192]{3,2,0,1} transpose(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), dimensions={1,0,2,3}
CHECK-DAG: %[[COPY1:.*]] = bf16[4,1,2048,8192]{3,2,1,0} copy(bf16[4,1,2048,8192]{3,2,0,1} %[[TRANSPOSE0:.*]])
CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,1,2048,8192]{4,3,2,1,0} reshape(bf16[4,1,2048,8192]{3,2,1,0} %[[COPY1:.*]])
CHECK: ROOT {{.*}} = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} reshape(bf16[1,4,1,2048,8192]{4,3,2,1,0} %[[RESHAPE0:.*]])
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched,
RunFileCheck(module->ToString(), kExpected));
EXPECT_TRUE(filecheck_matched);
}
TEST_F(WindowedEinsumHandlerTest, AllGatherF8) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[1536,24576]{1,0}, f32[], f32[])->f32[2,2048,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_ag {
input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0)
lhs = f32[2,512,24576]{2,1,0} get-tuple-element(input), index=0
permuted_lhs0 = f32[2,512,24576]{2,1,0} collective-permute(lhs), channel_id=4, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
permuted_lhs1 = f32[2,512,24576]{2,1,0} collective-permute(permuted_lhs0), channel_id=5, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
rhs = f32[24576,24576]{1,0} get-tuple-element(input), index=1
partial_dot_output = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=2
dot0 = f32[2,512,24576]{2,1,0} dot(lhs, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
c0 = s32[] constant(0)
dot_update_slice_offsets = s32[4]{0} constant({0, 512, 1024, 1536})
loop_counter = u32[] get-tuple-element(input), index=4
partition_id = u32[] partition-id()
loop_counter_plus_partition_id = u32[] add(loop_counter, partition_id)
c4 = u32[] constant(4)
dot_update_slice_offsets_index0 = u32[] remainder(loop_counter_plus_partition_id, c4)
dot_update_slice_offset0 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index0), dynamic_slice_sizes={1}
dot_update_slice_offset_scalar0 = s32[] reshape(dot_update_slice_offset0)
updated_dot_output0 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(partial_dot_output, dot0, c0, dot_update_slice_offset_scalar0, c0)
dot1 = f32[2,512,24576]{2,1,0} dot(permuted_lhs0, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
c1 = u32[] constant(1)
loop_counter_plus_one = u32[] add(loop_counter, c1)
loop_counter_plus_partiion_id_plus_one = u32[] add(loop_counter_plus_one, partition_id)
dot_update_slice_offsets_index1 = u32[] remainder(loop_counter_plus_partiion_id_plus_one, c4)
dot_update_slice_offset1 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index1), dynamic_slice_sizes={1}
dot_update_slice_offset1_scalar = s32[] reshape(dot_update_slice_offset1)
updated_dot_output1 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(updated_dot_output0, dot1, c0, dot_update_slice_offset1_scalar, c0)
pass_through = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=3
next_loop_counter = u32[] add(loop_counter_plus_one, c1)
ROOT tuple = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(permuted_lhs1, rhs, updated_dot_output1, pass_through, next_loop_counter)
}
windowed_dot_general_cond_ag {
input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0)
loop_counter = u32[] get-tuple-element(input), index=4
loop_limit = u32[] constant(4)
ROOT compare = pred[] compare(loop_counter, loop_limit), direction=LT
}
ENTRY main {
lhs = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
rhs = f8e4m3fn[1536,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
c0_f32 = f32[] constant(0)
c0_f32_bcast = f32[2,2048,24576]{2,1,0} broadcast(c0_f32), dimensions={}
c0_u32 = u32[] constant(0)
scale_lhs = f32[] parameter(2)
scale_lhs_bcast = f32[2,512,24576]{2,1,0} broadcast(scale_lhs), dimensions={}
lhs_f32 = f32[2,512,24576]{2,1,0} convert(lhs)
lhs_scaled = f32[2,512,24576]{2,1,0} multiply(lhs_f32, scale_lhs_bcast)
scale_rhs = f32[] parameter(3)
scale_rhs_bcast = f32[1536,24576]{1,0} broadcast(scale_rhs), dimensions={}
rhs_f32 = f32[1536,24576]{1,0} convert(rhs)
rhs_scaled = f32[1536,24576]{1,0} multiply(rhs_f32, scale_rhs_bcast)
rhs_bcast = f32[16,1536,24576]{2,1,0} broadcast(rhs_scaled), dimensions={1,2}
rhs_reshaped = f32[24576,24576]{1,0} reshape(rhs_bcast)
while_input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs_reshaped, c0_f32_bcast, c0_f32_bcast, c0_u32)
while = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) while(while_input), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
ROOT get-tuple-element.13 = f32[2,2048,24576]{2,1,0} get-tuple-element(while), index=2
}
)";
RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(),
R"(
; CHECK-LABEL: %unrolled_windowed_dot_general_body_ag
; CHECK-NEXT: [[INPUT:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) parameter(0)
; CHECK-NEXT: [[LHS:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} get-tuple-element([[INPUT]]), index=0
; CHECK-NEXT: [[PERMUTED_LHS0:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} collective-permute([[LHS]]), channel_id=6
; CHECK-NEXT: [[PERMUTED_LHS1:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} collective-permute([[PERMUTED_LHS0]]), channel_id=7
; CHECK-NEXT: [[RHS:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[INPUT]]), index=1
; CHECK-NEXT: [[PARTIAL_DOT_OUTPUT:%[^ ]+]] = f32[2,2048,24576]{2,1,0} get-tuple-element([[INPUT]]), index=2
; CHECK-NEXT: [[LHS_F32:%[^ ]+]] = f32[2,512,24576]{2,1,0} convert([[LHS]])
; CHECK-NEXT: [[SCALE_LHS:%[^ ]+]] = f32[] get-tuple-element([[INPUT]]), index=5
; CHECK-NEXT: [[SCALE_LHS_BCAST:%[^ ]+]] = f32[2,512,24576]{2,1,0} broadcast([[SCALE_LHS]]), dimensions={}
; CHECK-NEXT: [[LHS_SCALED:%[^ ]+]] = f32[2,512,24576]{2,1,0} multiply([[LHS_F32]], [[SCALE_LHS_BCAST]])
; CHECK-NEXT: [[RHS_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS]])
; CHECK-NEXT: [[SCALE_RHS:%[^ ]+]] = f32[] get-tuple-element([[INPUT]]), index=6
; CHECK-NEXT: [[SCALE_RHS_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS]]), dimensions={}
; CHECK-NEXT: [[RHS_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS_F32]], [[SCALE_RHS_BCAST]])
; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[LHS_SCALED]], [[RHS_SCALED]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0},
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]",
; CHECK-DAG: "wait_on_operation_queues":[],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[C0_S32:%[^ ]+]] = s32[] constant(0)
; CHECK-NEXT: [[C0_U32:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[C5:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[PARTITION_ID:%[^ ]+]] = u32[] partition-id()
; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[C5]], [[PARTITION_ID]])
; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(3)
; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD0]], [[C3]])
; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C0_U32]], [[AND0]], [[C3]])
; CHECK-NEXT: [[CONVERT3:%[^ ]+]] = s32[] convert([[CLAMP0]])
; CHECK-NEXT: [[C512:%[^ ]+]] = s32[] constant(512)
; CHECK-NEXT: [[MUL3:%[^ ]+]] = s32[] multiply([[CONVERT3]], [[C512]])
; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL3]])
; CHECK-NEXT: [[UPDATED_DOT_OUTPUT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dynamic-update-slice([[PARTIAL_DOT_OUTPUT]], [[DOT0]], [[C0_S32]], [[RESHAPE0]], [[C0_S32]]),
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"0",
; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID]]"],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[PERMUTED_LHS0_F32:%[^ ]+]] = f32[2,512,24576]{2,1,0} convert([[PERMUTED_LHS0]])
; CHECK-NEXT: [[PERMUTED_LHS_SCALED:%[^ ]+]] = f32[2,512,24576]{2,1,0} multiply([[PERMUTED_LHS0_F32]], [[SCALE_LHS_BCAST]])
; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[PERMUTED_LHS_SCALED]], [[RHS_SCALED]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0}
; CHECK-NEXT: [[LOOP_COUNTER:%[^ ]+]] = u32[] get-tuple-element([[INPUT]]), index=4
; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(1)
; CHECK-NEXT: [[LOOP_COUNTER_PLUS_ONE:%[^ ]+]] = u32[] add([[LOOP_COUNTER]], [[C1]])
; CHECK-NEXT: [[LOOP_COUNTER_PLUS_ONE_PLUS_PARTITION_ID:%[^ ]+]] = u32[] add([[LOOP_COUNTER_PLUS_ONE]], [[PARTITION_ID]])
; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[LOOP_COUNTER_PLUS_ONE_PLUS_PARTITION_ID]], [[C3]])
; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C0_U32]], [[AND1]], [[C3]])
; CHECK-NEXT: [[CONVERT4:%[^ ]+]] = s32[] convert([[CLAMP1]])
; CHECK-NEXT: [[MUL4:%[^ ]+]] = s32[] multiply([[CONVERT4]], [[C512]])
; CHECK-NEXT: [[RESHAPE1:%[^ ]+]] = s32[] reshape([[MUL4]])
; CHECK-NEXT: [[UPDATED_DOT_OUTPUT1:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dynamic-update-slice([[UPDATED_DOT_OUTPUT0]], [[DOT1]], [[C0_S32]], [[RESHAPE1]], [[C0_S32]])
; CHECK-NEXT: [[PASS_THROUGH:%[^ ]+]] = f32[2,2048,24576]{2,1,0} get-tuple-element([[INPUT]]), index=3
; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(2)
; CHECK-NEXT: [[NEXT_LOOP_COUNTER:%[^ ]+]] = u32[] add([[LOOP_COUNTER]], [[C2]])
; CHECK-NEXT: [[TUPLE:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[PERMUTED_LHS1]], [[RHS]], [[UPDATED_DOT_OUTPUT1]], [[PASS_THROUGH]], [[NEXT_LOOP_COUNTER]], [[SCALE_LHS]], [[SCALE_RHS]])
; CHECK-LABEL: ENTRY %main
; CHECK: [[LHS:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
; CHECK-NEXT: [[RHS:%[^ ]+]] = f8e4m3fn[1536,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
; CHECK-NEXT: [[RHS_BCAST:%[^ ]+]] = f8e4m3fn[16,1536,24576]{2,1,0} broadcast([[RHS]]), dimensions={1,2}
; CHECK-NEXT: [[RHS_RESHAPED:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} reshape([[RHS_BCAST]])
; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0)
; CHECK-NEXT: [[C0_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[C0]]), dimensions={}
; CHECK-NEXT: [[C0_U32:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[SCALE_LHS:%[^ ]+]] = f32[] parameter(2)
; CHECK-NEXT: [[SCALE_RHS:%[^ ]+]] = f32[] parameter(3)
; CHECK-NEXT: [[WHILE_INPUT:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[LHS]], [[RHS_RESHAPED]], [[C0_BCAST]], [[C0_BCAST]], [[C0_U32]], [[SCALE_LHS]], [[SCALE_RHS]])
; CHECK: [[WHILE:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) while([[WHILE_INPUT]]),
; CHECK-DAG: condition=%unrolled_windowed_dot_general_cond_ag,
; CHECK-DAG: body=%unrolled_windowed_dot_general_body_ag
)");
}
TEST_F(WindowedEinsumHandlerTest, ReduceScatterF8) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f8e4m3fn[2,2048,24576]{2,1,0}, f32[], f32[])->f32[2,512,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_rs {
param.3 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element.lhs = f32[2,2048,24576]{2,1,0} get-tuple-element(param.3), index=0
get-tuple-element.rhs = f32[24576,24576]{1,0} get-tuple-element(param.3), index=1
get-tuple-element.output = f32[2,512,24576]{2,1,0} get-tuple-element(param.3), index=2
collective-permute.send_shard = f32[2,512,24576]{2,1,0} collective-permute(get-tuple-element.output), channel_id=9, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}
constant.zero = s32[] constant(0)
constant.loop_index = s32[4]{0} constant({0, 512, 1024, 1536})
get-tuple-element.loop_iter = u32[] get-tuple-element(param.3), index=4
constant.iter_increment = u32[] constant(2)
add.8 = u32[] add(get-tuple-element.loop_iter, constant.iter_increment)
constant.27 = u32[] constant(1)
add.9 = u32[] add(add.8, constant.27)
partition-id.3 = u32[] partition-id()
add.shard_index = u32[] add(add.9, partition-id.3)
constant.22 = u32[] constant(4)
remainder.shard_index = u32[] remainder(add.shard_index, constant.22)
dynamic-slice.shard_start_index = s32[1]{0} dynamic-slice(constant.loop_index, remainder.shard_index), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.shard_start_index)
dynamic-slice.shard_to_compute = f32[2,512,24576]{2,1,0} dynamic-slice(get-tuple-element.lhs, constant.zero, reshape.3, constant.zero), dynamic_slice_sizes={2,512,24576}
dot.first_shard_dot = f32[2,512,24576]{2,1,0} dot(dynamic-slice.shard_to_compute, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
add.shard_partial_result = f32[2,512,24576]{2,1,0} add(collective-permute.send_shard, dot.first_shard_dot)
get-tuple-element.10 = f32[2,512,24576]{2,1,0} get-tuple-element(param.3), index=3
add.6 = u32[] add(get-tuple-element.loop_iter, partition-id.3)
remainder.2 = u32[] remainder(add.6, constant.22)
dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.loop_index, remainder.2), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.2)
dynamic-slice.3 = f32[2,512,24576]{2,1,0} dynamic-slice(get-tuple-element.lhs, constant.zero, reshape.2, constant.zero), dynamic_slice_sizes={2,512,24576}
dot.second_shard_dot = f32[2,512,24576]{2,1,0} dot(dynamic-slice.3, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
add.7 = f32[2,512,24576]{2,1,0} add(get-tuple-element.10, dot.second_shard_dot)
collective-permute.send_second_shard = f32[2,512,24576]{2,1,0} collective-permute(add.7), channel_id=10, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}
ROOT tuple.1 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) tuple(get-tuple-element.lhs, get-tuple-element.rhs, add.shard_partial_result, collective-permute.send_second_shard, add.8)
}
windowed_dot_general_cond_rs {
param.2 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) parameter(0)
get-tuple-element.6 = u32[] get-tuple-element(param.2), index=4
constant.21 = u32[] constant(4)
ROOT compare.1 = pred[] compare(get-tuple-element.6, constant.21), direction=LT
}
ENTRY main.9_spmd {
param.6 = f8e4m3fn[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]}
param.7 = f32[2,512,24576]{2,1,0} parameter(1)
param.8 = f8e4m3fn[2,2048,24576]{2,1,0} parameter(2)
constant.20 = u32[] constant(0)
scale_lhs = f32[] parameter(3)
scale_lhs_bcast = f32[2,2048,24576]{2,1,0} broadcast(scale_lhs), dimensions={}
lhs_bf16 = f32[2,2048,24576]{2,1,0} convert(param.8)
lhs_scaled = f32[2,2048,24576]{2,1,0} multiply(lhs_bf16, scale_lhs_bcast)
scale_rhs = f32[] parameter(4)
scale_rhs_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs), dimensions={}
rhs_bf16 = f32[24576,24576]{1,0} convert(param.6)
rhs_scaled = f32[24576,24576]{1,0} multiply(rhs_bf16, scale_rhs_bcast)
tuple.3 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs_scaled, param.7, param.7, constant.20)
while.1 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs
ROOT get-tuple-element.14 = f32[2,512,24576]{2,1,0} get-tuple-element(while.1), index=2
}
)";
RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(),
R"(
; CHECK-LABEL: unrolled_windowed_dot_general_body_rs
; CHECK-NEXT: [[P0:%[^ ]+]] = (f8e4m3fn[2,2048,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[], f32[], f32[]) parameter(0)
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[P0]]), index=0
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[P0]]), index=1
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[P0]]), index=2
; CHECK-NEXT: [[CP0:%[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[GTE2]]), channel_id=11
; CHECK-NEXT: [[CONVERT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[GTE0]])
; CHECK-NEXT: [[GTE3:%[^ ]+]] = f32[] get-tuple-element([[P0]]), index=5
; CHECK-NEXT: [[BCAST0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[GTE3]]), dimensions={}
; CHECK-NEXT: [[MUL0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[CONVERT0]], [[BCAST0]])
; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0)
; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[GTE4:%[^ ]+]] = u32[] get-tuple-element([[P0]]), index=4
; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(3)
; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[GTE4]], [[C2]])
; CHECK-NEXT: [[PID:%[^ ]+]] = u32[] partition-id()
; CHECK-NEXT: [[ADD2:%[^ ]+]] = u32[] add([[ADD0]], [[PID]])
; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD2]], [[C2]])
; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C1]], [[AND0]], [[C2]])
; CHECK-NEXT: [[CONVERT10:%[^ ]+]] = s32[] convert([[CLAMP0]])
; CHECK-NEXT: [[C10:%[^ ]+]] = s32[] constant(512)
; CHECK-NEXT: [[MUL10:%[^ ]+]] = s32[] multiply([[CONVERT10]], [[C10]])
; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL10]])
; CHECK-NEXT: [[DSLICE1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE0]], [[C0]]), dynamic_slice_sizes={2,512,24576}
; CHECK-NEXT: [[CONVERT1:%[^ ]+]] = f32[24576,24576]{1,0} convert([[GTE1]])
; CHECK-NEXT: [[GTE5:%[^ ]+]] = f32[] get-tuple-element([[P0]]), index=6
; CHECK-NEXT: [[BCAST1:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[GTE5]]), dimensions={}
; CHECK-NEXT: [[MUL1:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[CONVERT1]], [[BCAST1]])
; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE1]], [[MUL1]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0},
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID0:[1-9][0-9]*]]",
; CHECK-DAG: "wait_on_operation_queues":[],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[ADD3:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[CP0]], [[DOT0]]),
; CHECK-DAG: backend_config={"
; CHECK-DAG: operation_queue_id":"0",
; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID0]]"],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[GTE6:[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[P0]]), index=3
; CHECK-NEXT: [[C11:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[ADD6:%[^ ]+]] = u32[] add([[C11]], [[PID]])
; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[ADD6]], [[C2]])
; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C1]], [[AND1]], [[C2]])
; CHECK-NEXT: [[CONVERT11:%[^ ]+]] = s32[] convert([[CLAMP1]])
; CHECK-NEXT: [[MUL11:%[^ ]+]] = s32[] multiply([[CONVERT11]], [[C10]])
; CHECK-NEXT: [[RESHAPE2:%[^ ]+]] = s32[] reshape([[MUL11]])
; CHECK-NEXT: [[DSLICE3:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE2]], [[C0]]), dynamic_slice_sizes={2,512,24576}
; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE3]], [[MUL1]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0}
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]",
; CHECK-DAG: "wait_on_operation_queues":[],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[ADD5:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[GTE6]], [[DOT1]])
; CHECK-NEXT: [[CP1:[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[ADD5]]), channel_id=12
; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(2)
; CHECK-NEXT: [[ADD7:%[^ ]+]] = u32[] add([[GTE4]], [[C3]])
; CHECK-NEXT: [[TUPLE0:[^ ]+]] = (f8e4m3fn[2,2048,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[GTE0]], [[GTE1]], [[ADD3]], [[CP1]], [[ADD7]], [[GTE3]], [[GTE5]])
; CHECK-NEXT: [[GTE0:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=0
; CHECK-NEXT: [[GTE1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[TUPLE0]]), index=1
; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=2
; CHECK-NEXT: [[CP0:%[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[GTE2]]), channel_id=13
; CHECK-NEXT: [[CONVERT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[GTE0]])
; CHECK-NEXT: [[GTE3:%[^ ]+]] = f32[] get-tuple-element([[TUPLE0]]), index=5
; CHECK-NEXT: [[BCAST0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[GTE3]]), dimensions={}
; CHECK-NEXT: [[MUL0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[CONVERT0]], [[BCAST0]])
; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0)
; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(0)
; CHECK-NEXT: [[GTE4:%[^ ]+]] = u32[] get-tuple-element([[TUPLE0]]), index=4
; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(3)
; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[GTE4]], [[C2]])
; CHECK-NEXT: [[PID:%[^ ]+]] = u32[] partition-id()
; CHECK-NEXT: [[ADD2:%[^ ]+]] = u32[] add([[ADD0]], [[PID]])
; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD2]], [[C2]])
; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C1]], [[AND0]], [[C2]])
; CHECK-NEXT: [[CONVERT10:%[^ ]+]] = s32[] convert([[CLAMP0]])
; CHECK-NEXT: [[C10:%[^ ]+]] = s32[] constant(512)
; CHECK-NEXT: [[MUL10:%[^ ]+]] = s32[] multiply([[CONVERT10]], [[C10]])
; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL10]])
; CHECK-NEXT: [[DSLICE1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE0]], [[C0]]), dynamic_slice_sizes={2,512,24576}
; CHECK-NEXT: [[CONVERT1:%[^ ]+]] = f32[24576,24576]{1,0} convert([[GTE1]])
; CHECK-NEXT: [[GTE5:%[^ ]+]] = f32[] get-tuple-element([[TUPLE0]]), index=6
; CHECK-NEXT: [[BCAST1:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[GTE5]]), dimensions={}
; CHECK-NEXT: [[MUL1:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[CONVERT1]], [[BCAST1]])
; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE1]], [[MUL1]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0},
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]",
; CHECK-DAG: "wait_on_operation_queues":[],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[ADD3:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[CP0]], [[DOT0]]),
; CHECK-DAG: backend_config={"
; CHECK-DAG: operation_queue_id":"0",
; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID]]"],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[GTE6:[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=3
; CHECK-NEXT: [[C11:%[^ ]+]] = u32[] constant(1)
; CHECK-NEXT: [[ADD6:%[^ ]+]] = u32[] add([[C11]], [[PID]])
; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[ADD6]], [[C2]])
; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C1]], [[AND1]], [[C2]])
; CHECK-NEXT: [[CONVERT11:%[^ ]+]] = s32[] convert([[CLAMP1]])
; CHECK-NEXT: [[MUL11:%[^ ]+]] = s32[] multiply([[CONVERT11]], [[C10]])
; CHECK-NEXT: [[RESHAPE2:%[^ ]+]] = s32[] reshape([[MUL11]])
; CHECK-NEXT: [[DSLICE3:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE2]], [[C0]]), dynamic_slice_sizes={2,512,24576}
; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE3]], [[MUL1]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0}
; CHECK-DAG: backend_config={
; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]",
; CHECK-DAG: "wait_on_operation_queues":[],
; CHECK-DAG: "force_earliest_schedule":false}
; CHECK-NEXT: [[ADD5:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[GTE6]], [[DOT1]])
; CHECK-NEXT: [[CP1:[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[ADD5]]), channel_id=14
; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(2)
; CHECK-NEXT: [[ADD7:%[^ ]+]] = u32[] add([[GTE4]], [[C3]])
)");
}
TEST_F(WindowedEinsumHandlerTest, AllGatherMultipleConsumersF8) {
constexpr absl::string_view kHloString = R"(
HloModule all_gather_multiple_consumers_f8, entry_computation_layout={(f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f8e4m3fn[24576,24576]{1,0}, f8e4m3fn[24576,24576]{1,0}, f32[], f32[], f32[], f32[])->f32[2,2048,24576]{2,1,0}}, num_partitions=4
windowed_dot_general_body_ag {
input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0)
lhs = f32[2,512,24576]{2,1,0} get-tuple-element(input), index=0
permuted_lhs0 = f32[2,512,24576]{2,1,0} collective-permute(lhs), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
permuted_lhs1 = f32[2,512,24576]{2,1,0} collective-permute(permuted_lhs0), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}
rhs = f32[24576,24576]{1,0} get-tuple-element(input), index=1
partial_dot_output = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=2
dot0 = f32[2,512,24576]{2,1,0} dot(lhs, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
c0 = s32[] constant(0)
dot_update_slice_offsets = s32[4]{0} constant({0, 512, 1024, 1536})
loop_counter = u32[] get-tuple-element(input), index=4
partition_id = u32[] partition-id()
loop_counter_plus_partition_id = u32[] add(loop_counter, partition_id)
c4 = u32[] constant(4)
dot_update_slice_offsets_index0 = u32[] remainder(loop_counter_plus_partition_id, c4)
dot_update_slice_offset0 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index0), dynamic_slice_sizes={1}
dot_update_slice_offset_scalar0 = s32[] reshape(dot_update_slice_offset0)
updated_dot_output0 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(partial_dot_output, dot0, c0, dot_update_slice_offset_scalar0, c0)
dot1 = f32[2,512,24576]{2,1,0} dot(permuted_lhs0, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0}
c1 = u32[] constant(1)
loop_counter_plus_one = u32[] add(loop_counter, c1)
loop_counter_plus_partition_id_plus_one = u32[] add(loop_counter_plus_one, partition_id)
dot_update_slice_offsets_index1 = u32[] remainder(loop_counter_plus_partition_id_plus_one, c4)
dot_update_slice_offset1 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index1), dynamic_slice_sizes={1}
dot_update_slice_offset1_scalar = s32[] reshape(dot_update_slice_offset1)
updated_dot_output1 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(updated_dot_output0, dot1, c0, dot_update_slice_offset1_scalar, c0)
pass_through = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=3
next_loop_counter = u32[] add(loop_counter_plus_one, c1)
ROOT tuple = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(permuted_lhs1, rhs, updated_dot_output1, pass_through, next_loop_counter)
}
windowed_dot_general_cond_ag {
input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0)
loop_counter = u32[] get-tuple-element(input), index=4
loop_limit = u32[] constant(4)
ROOT compare = pred[] compare(loop_counter, loop_limit), direction=LT
}
ENTRY main {
lhs = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]}
rhs0 = f8e4m3fn[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]}
c0_f32 = f32[] constant(0)
c0_f32_bcast = f32[2,2048,24576]{2,1,0} broadcast(c0_f32), dimensions={}
c0_u32 = u32[] constant(0)
scale_lhs = f32[] parameter(4)
scale_lhs_bcast = f32[2,512,24576]{2,1,0} broadcast(scale_lhs), dimensions={}
lhs_f32 = f32[2,512,24576]{2,1,0} convert(lhs)
lhs_scaled = f32[2,512,24576]{2,1,0} multiply(lhs_f32, scale_lhs_bcast)
scale_rhs0 = f32[] parameter(5)
scale_rhs0_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs0), dimensions={}
rhs0_f32 = f32[24576,24576]{1,0} convert(rhs0)
rhs0_scaled = f32[24576,24576]{1,0} multiply(rhs0_f32, scale_rhs0_bcast)
while_input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs0_scaled, c0_f32_bcast, c0_f32_bcast, c0_u32)
while = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) while(while_input), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
all-gather1 = f32[2,2048,24576]{2,1,0} all-gather(lhs_scaled), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
rhs1 = f8e4m3fn[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
scale_rhs1 = f32[] parameter(6)
scale_rhs1_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs1), dimensions={}
rhs1_f32 = f32[24576,24576]{1,0} convert(rhs1)
rhs1_scaled = f32[24576,24576]{1,0} multiply(rhs1_f32, scale_rhs1_bcast)
dot1 = f32[2,2048,24576]{2,1,0} dot(all-gather1, rhs1_scaled), lhs_contracting_dims={2}, rhs_contracting_dims={0}
all-gather2 = f32[2,2048,24576]{2,1,0} all-gather(lhs_scaled), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true
rhs2 = f8e4m3fn[24576,24576]{1,0} parameter(3), sharding={devices=[1,4]<=[4]}
scale_rhs2 = f32[] parameter(7)
scale_rhs2_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs2), dimensions={}
rhs2_f32 = f32[24576,24576]{1,0} convert(rhs2)
rhs2_scaled = f32[24576,24576]{1,0} multiply(rhs2_f32, scale_rhs2_bcast)
dot2 = f32[2,2048,24576]{2,1,0} dot(all-gather2, rhs2_scaled), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT product = f32[2,2048,24576]{2,1,0} multiply(dot1, dot2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(),
R"(
; CHECK-LABEL: %main
; CHECK: [[WHILE0:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[], f8e4m3fn[2,2048,24576]{2,1,0}) while([[TUPLE0:%[^ ]+]]),
; CHECK-DAG: condition=%unrolled_windowed_dot_general_cond_ag,
; CHECK-DAG: body=%unrolled_windowed_dot_general_body_ag
; CHECK: [[LHS1:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[WHILE0]]), index=7
; CHECK-NEXT: [[LHS1_F32:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[LHS1]])
; CHECK-NEXT: [[SCALE_LHS1_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[SCALE_LHS1:%[^ ]+]]), dimensions={}
; CHECK-NEXT: [[LHS1_SCALED:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[LHS1_F32]], [[SCALE_LHS1_BCAST]])
; CHECK-NEXT: [[RHS1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]}
; CHECK-NEXT: [[RHS1_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS1]])
; CHECK: [[SCALE_RHS1_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS1:%[^ ]+]]), dimensions={}
; CHECK-NEXT: [[RHS1_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS1_F32]], [[SCALE_RHS1_BCAST]])
; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dot([[LHS1_SCALED]], [[RHS1_SCALED]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0}
; CHECK: [[LHS2:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[WHILE0]]), index=7
; CHECK-NEXT: [[LHS2_F32:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[LHS2]])
; CHECK-NEXT: [[SCALE_LHS2_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[SCALE_LHS2:%[^ ]+]]), dimensions={}
; CHECK-NEXT: [[LHS2_SCALED:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[LHS2_F32]], [[SCALE_LHS2_BCAST]])
; CHECK-NEXT: [[RHS2:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} parameter(3), sharding={devices=[1,4]<=[4]}
; CHECK-NEXT: [[RHS2_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS2]])
; CHECK-NEXT: [[SCALE_RHS2:%[^ ]+]] = f32[] parameter(7)
; CHECK-NEXT: [[SCALE_RHS2_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS2]]), dimensions={}
; CHECK-NEXT: [[RHS2_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS2_F32]], [[SCALE_RHS2_BCAST]])
; CHECK-NEXT: [[DOT2:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dot([[LHS2_SCALED]], [[RHS2_SCALED]]),
; CHECK-DAG: lhs_contracting_dims={2},
; CHECK-DAG: rhs_contracting_dims={0}
; CHECK-NEXT: ROOT [[OUT:[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[DOT1]], [[DOT2]])
)");
}
TEST_F(WindowedEinsumHandlerTest,
AgLoopsMultipleConsumersAreChainedWithShardedContratingDim) {
constexpr absl::string_view kHloString = R"(
HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0})->bf16[4096,6288]{1,0}}, num_partitions=8
windowed_dot_general_body_ag {
param.195 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) parameter(0)
get-tuple-element.588 = bf16[16,2048,512]{2,1,0} get-tuple-element(param.195), index=0
collective-permute.194 = bf16[16,2048,512]{2,1,0} collective-permute(get-tuple-element.588), channel_id=446, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}}
collective-permute.195 = bf16[16,2048,512]{2,1,0} collective-permute(collective-permute.194), channel_id=447, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}}
get-tuple-element.589 = bf16[4096,6288]{1,0} get-tuple-element(param.195), index=1
get-tuple-element.590 = bf16[16,2048,6288]{2,1,0} get-tuple-element(param.195), index=2
constant.11432 = s32[8]{0} constant({0, 512, 1024, 1536, 2048, 2560, 3072, 3584})
get-tuple-element.592 = u32[] get-tuple-element(param.195), index=4
partition-id.194 = u32[] partition-id()
add.4309 = u32[] add(get-tuple-element.592, partition-id.194)
constant.11431 = u32[] constant(8)
remainder.194 = u32[] remainder(add.4309, constant.11431)
dynamic-slice.388 = s32[1]{0} dynamic-slice(constant.11432, remainder.194), dynamic_slice_sizes={1}
reshape.12959 = s32[] reshape(dynamic-slice.388)
constant.11433 = s32[] constant(0)
dynamic-slice.389 = bf16[512,6288]{1,0} dynamic-slice(get-tuple-element.589, reshape.12959, constant.11433), dynamic_slice_sizes={512,6288}
dot.244 = bf16[16,2048,6288]{2,1,0} dot(get-tuple-element.588, dynamic-slice.389), lhs_contracting_dims={2}, rhs_contracting_dims={0}
add.4310 = bf16[16,2048,6288]{2,1,0} add(get-tuple-element.590, dot.244)
constant.11434 = u32[] constant(1)
add.4312 = u32[] add(get-tuple-element.592, constant.11434)
add.4313 = u32[] add(add.4312, partition-id.194)
remainder.195 = u32[] remainder(add.4313, constant.11431)
dynamic-slice.390 = s32[1]{0} dynamic-slice(constant.11432, remainder.195), dynamic_slice_sizes={1}
reshape.12960 = s32[] reshape(dynamic-slice.390)
dynamic-slice.391 = bf16[512,6288]{1,0} dynamic-slice(get-tuple-element.589, reshape.12960, constant.11433), dynamic_slice_sizes={512,6288}
dot.245 = bf16[16,2048,6288]{2,1,0} dot(collective-permute.194, dynamic-slice.391), lhs_contracting_dims={2}, rhs_contracting_dims={0}
add.4314 = bf16[16,2048,6288]{2,1,0} add(add.4310, dot.245)
get-tuple-element.591 = bf16[16,2048,6288]{2,1,0} get-tuple-element(param.195), index=3
add.4315 = u32[] add(add.4312, constant.11434)
ROOT tuple.98 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) tuple(collective-permute.195, get-tuple-element.589, add.4314, get-tuple-element.591, add.4315)
}
windowed_dot_general_cond_ag {
param = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) parameter(0)
get-tuple-element = u32[] get-tuple-element(param), index=4
constant = u32[] constant(4)
ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT
}
ENTRY main.12_spmd {
param.4 = bf16[16,2048,512]{2,1,0} parameter(0)
param.5 = bf16[4096,6288]{1,0} parameter(1)
constant.22 = bf16[] constant(0)
broadcast = bf16[16,2048,6288]{2,1,0} broadcast(constant.22), dimensions={}
constant.24 = u32[] constant(0)
tuple.2 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24)
while = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag
get-tuple-element.13 = bf16[16,2048,6288]{2,1,0} get-tuple-element(while), index=2
all-gather = bf16[16,2048,4096]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={2}, use_global_device_ids=true
param.6 = bf16[16,2048,6288]{2,1,0} parameter(2)
ROOT dot.7 = bf16[4096,6288]{1,0} dot(all-gather, param.6), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
WindowedEinsumHandler gpu_handler;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* ag_loop =
FindInstructionByName(module->entry_computation(), "while");
HloInstruction* inst =
FindInstructionByName(module->entry_computation(), "dot.7");
EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(inst->operand(0)->tuple_index(), 5);
EXPECT_EQ(inst->operand(0)->operand(0), ag_loop);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dbee8c02-9029-4404-9614-1b7062d0d7a5 | cpp | tensorflow/tensorflow | coordination_service | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc | third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_test.cc | #include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/bind_front.h"
#include "absl/hash/hash.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "xla/tsl/util/device_name_utils.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
namespace tsl {
namespace {
using tensorflow::CoordinatedTask;
using tensorflow::CoordinatedTaskState;
using tensorflow::CoordinatedTaskStateInfo;
using tensorflow::CoordinationServiceConfig;
using tensorflow::CoordinationServiceError;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
constexpr absl::Duration kDevicePropagationTimeout = absl::Hours(1);
constexpr int kDefaultHeartbeatTimeoutMs = 10 * 1000;
constexpr int kServiceToClientTimeoutMs = 10 * 1000;
constexpr size_t kOngoingBarriersSoftLimit = 20;
constexpr char kHealthCheckThread[] = "CoordinationServiceHealthCheck";
constexpr int kPendingTaskLogLimit = 20;
constexpr int kPendingStragglerLogLimit = 3;
std::string GetTaskName(std::string_view job_name, int task_id) {
return absl::StrCat("/job:", job_name, "/replica:", 0, "/task:", task_id);
}
std::string GetTaskName(const CoordinatedTask& task) {
return GetTaskName(task.job_name(), task.task_id());
}
CoordinatedTask GetTaskFromName(std::string_view task_name) {
DeviceNameUtils::ParsedName parsed;
DeviceNameUtils::ParseFullName(task_name, &parsed);
CoordinatedTask task;
task.set_job_name(parsed.job);
task.set_task_id(parsed.task);
return task;
}
struct CoordinatedTaskHash {
uint64_t operator()(const CoordinatedTask& task) const {
return absl::HashOf(task.job_name(), task.task_id());
}
};
struct CoordinatedTaskEqual {
bool operator()(const CoordinatedTask& lhs,
const CoordinatedTask& rhs) const {
return lhs.job_name() == rhs.job_name() && lhs.task_id() == rhs.task_id();
}
};
class CoordinationServiceStandaloneImpl : public CoordinationServiceInterface {
public:
CoordinationServiceStandaloneImpl(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> client_cache);
~CoordinationServiceStandaloneImpl() override { Stop(); }
void SetDeviceAggregationFunction(
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn) override;
void LogConnectStatusLocked() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
absl::Status RegisterTask(const CoordinatedTask& task,
uint64_t incarnation) override;
void WaitForAllTasks(const CoordinatedTask& task, const DeviceInfo& devices,
StatusCallback done) override;
void ShutdownTaskAsync(const CoordinatedTask& task,
StatusCallback done) override;
absl::Status ResetTask(const CoordinatedTask& task) override;
absl::Status RecordHeartbeat(const CoordinatedTask& task,
uint64_t incarnation) override;
absl::Status ReportTaskError(const CoordinatedTask& task,
absl::Status error) override;
std::vector<CoordinatedTaskStateInfo> GetTaskState(
const std::vector<CoordinatedTask>& task) override;
absl::Status InsertKeyValue(std::string_view key,
std::string_view value) override;
absl::Status InsertKeyValue(std::string_view key, std::string_view value,
bool allow_overwrite) override;
void GetKeyValueAsync(std::string_view key,
StatusOrValueCallback done) override;
absl::StatusOr<std::string> TryGetKeyValue(std::string_view key) override;
std::vector<KeyValueEntry> GetKeyValueDir(
std::string_view directory_key) override;
absl::Status DeleteKeyValue(std::string_view key) override;
void BarrierAsync(std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) override;
absl::Status CancelBarrier(std::string_view barrier_id,
const CoordinatedTask& task) override;
void PollForErrorAsync(const CoordinatedTask& task,
StatusCallback done) override;
private:
const DeviceInfo& ListClusterDevices() override
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
uint64_t GetServiceIncarnation() override;
void CheckHeartbeatTimeout();
void CheckBarrierTimeout();
void CheckStaleness();
void StartCheckStaleness();
void Stop(bool shut_staleness_thread = true);
bool ServiceHasStopped() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void ReportServiceErrorToTaskAsync(const CoordinatedTask& destination_task,
absl::Status error);
void PropagateError(const CoordinatedTask& source_task,
bool is_reported_by_task = false)
ABSL_LOCKS_EXCLUDED(state_mu_);
void SetTaskError(std::string_view task_name, absl::Status error)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
absl::Status DisconnectTask(const CoordinatedTask& task)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
struct BarrierState {
bool passed = false;
absl::Status result = absl::UnknownError(
"Invalid barrier result.");
uint64_t deadline_in_micros = 0;
int num_pending_tasks = 0;
absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>
tasks_at_barrier;
std::vector<StatusCallback> done_callbacks;
CoordinatedTask initiating_task;
};
bool ValidateBarrierArgs(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done);
bool InitializeBarrier(
BarrierState* barrier, std::string_view barrier_id,
absl::Duration timeout, const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void PassBarrier(std::string_view barrier_id, absl::Status result,
BarrierState* barrier)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void AggregateClusterDevices() ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
void CompleteShutdownAfterBarrier(absl::Status result, BarrierState* barrier)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mu_);
bool ValidateTaskArgs(
const std::vector<CoordinatedTask>& tasks_args,
const absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>& tasks_at_barrier,
int64_t cluster_size);
bool isRecoverableJob(std::string_view task_name) const;
void SendErrorPollingResponse(const absl::Status& error);
bool SendErrorPollingResponseOrStopService(const absl::Status& error);
bool IsClientPollingForError() const;
class ErrorPollingState {
public:
bool Responded() const { return responded_; }
void SetError(const absl::Status& error);
const absl::Status& GetError() const { return error_; }
bool IsTaskPolling(absl::string_view task_name) const {
return polling_task_names_.contains(task_name);
}
void AddTask(const CoordinatedTask& task, StatusCallback&& done);
private:
bool responded_ = false;
absl::Status error_ = absl::OkStatus();
std::vector<StatusCallback> done_callbacks_;
absl::flat_hash_set<std::string> polling_task_names_;
};
class TaskState {
public:
CoordinatedTaskState GetState() { return state_; }
absl::Status GetStatus() { return status_; }
uint64_t GetTaskIncarnation() { return task_incarnation_; }
void SetConnected(uint64_t task_incarnation);
void Disconnect(uint64_t grace_period_duration_us);
absl::Status RecordHeartbeat(uint64_t task_incarnation);
int64_t TimeSinceLastHeartbeatMs();
void SetError(absl::Status status);
DeviceInfo GetDeviceInfo() { return devices_; }
void CollectDeviceInfo(const DeviceInfo& devices) { devices_ = devices; }
bool DeviceInfoIsCollected() { return devices_.device_size() != 0; }
absl::flat_hash_set<std::string> GetOngoingBarriers();
void JoinBarrier(std::string_view barrier_id);
void ExitBarrier(std::string_view barrier_id);
bool IsDisconnectedBeyondGracePeriod();
private:
uint64_t task_incarnation_ = 0;
CoordinatedTaskState state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
absl::Status status_;
absl::Mutex last_heartbeat_mu_;
uint64_t last_heartbeat_us_ ABSL_GUARDED_BY(last_heartbeat_mu_);
uint64_t disconnect_grace_period_us_ = 0;
DeviceInfo devices_;
absl::flat_hash_set<std::string> ongoing_barriers_for_task_;
};
std::unique_ptr<CoordinationClientCache> client_cache_;
Env& env_;
const uint64_t service_incarnation_ = random::New64();
const uint64_t heartbeat_timeout_ms_;
const absl::Duration shutdown_barrier_timeout_;
bool allow_new_incarnation_to_reconnect_ = false;
bool client_polling_for_error_ = false;
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn_;
const std::string device_propagation_barrier_id_ =
absl::StrCat("WaitForAllTasks::", std::to_string(service_incarnation_));
const std::string shutdown_barrier_id_ =
absl::StrCat("Shutdown::", std::to_string(service_incarnation_));
absl::Mutex state_mu_;
absl::flat_hash_map<std::string, std::unique_ptr<TaskState>> cluster_state_
ABSL_GUARDED_BY(state_mu_);
DeviceInfo cluster_devices_ ABSL_GUARDED_BY(state_mu_);
absl::Mutex kv_mu_;
std::map<std::string, std::string> kv_store_ ABSL_GUARDED_BY(kv_mu_);
absl::flat_hash_map<std::string, std::vector<StatusOrValueCallback>> get_cb_
ABSL_GUARDED_BY(kv_mu_);
absl::CondVar check_staleness_thread_cv_;
bool shutting_down_ ABSL_GUARDED_BY(state_mu_) = false;
std::unique_ptr<Thread> check_staleness_thread_;
absl::flat_hash_map<std::string, BarrierState> barriers_
ABSL_GUARDED_BY(state_mu_);
absl::flat_hash_set<std::string> ongoing_barriers_ ABSL_GUARDED_BY(state_mu_);
absl::flat_hash_set<std::string> recoverable_jobs_;
ErrorPollingState error_polling_state_ ABSL_GUARDED_BY(state_mu_);
CoordinationServiceStandaloneImpl(const CoordinationServiceStandaloneImpl&) =
delete;
void operator=(const CoordinationServiceStandaloneImpl&) = delete;
};
void CoordinationServiceStandaloneImpl::ErrorPollingState::SetError(
const absl::Status& error) {
if (responded_) return;
responded_ = true;
error_ = error;
for (auto& done_cb : done_callbacks_) {
done_cb(error_);
}
done_callbacks_.clear();
}
void CoordinationServiceStandaloneImpl::ErrorPollingState::AddTask(
const CoordinatedTask& task, StatusCallback&& done) {
if (Responded()) return;
polling_task_names_.insert(GetTaskName(task));
done_callbacks_.emplace_back(done);
}
void CoordinationServiceStandaloneImpl::TaskState::SetConnected(
uint64_t task_incarnation) {
state_ = CoordinatedTaskState::TASKSTATE_CONNECTED;
status_ = absl::OkStatus();
task_incarnation_ = task_incarnation;
absl::MutexLock l(&last_heartbeat_mu_);
last_heartbeat_us_ = Env::Default()->NowMicros();
}
void CoordinationServiceStandaloneImpl::TaskState::Disconnect(
uint64_t grace_period_duration_us) {
disconnect_grace_period_us_ =
Env::Default()->NowMicros() + grace_period_duration_us;
state_ = CoordinatedTaskState::TASKSTATE_DISCONNECTED;
status_ = absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::TaskState::SetError(
const absl::Status status) {
if (state_ == CoordinatedTaskState::TASKSTATE_ERROR) return;
state_ = CoordinatedTaskState::TASKSTATE_ERROR;
status_ = status;
}
absl::Status CoordinationServiceStandaloneImpl::TaskState::RecordHeartbeat(
uint64_t task_incarnation) {
if (!status_.ok()) return status_;
if (task_incarnation != task_incarnation_) {
return MakeCoordinationError(absl::AbortedError(absl::StrCat(
"Incarnation ID mismatch: expecting ", task_incarnation_, " but got ",
task_incarnation, ". This means the remote task has restarted.")));
}
absl::MutexLock l(&last_heartbeat_mu_);
last_heartbeat_us_ = Env::Default()->NowMicros();
return absl::OkStatus();
}
int64_t
CoordinationServiceStandaloneImpl::TaskState::TimeSinceLastHeartbeatMs() {
absl::MutexLock l(&last_heartbeat_mu_);
return (Env::Default()->NowMicros() - last_heartbeat_us_) / 1000;
}
absl::flat_hash_set<std::string>
CoordinationServiceStandaloneImpl::TaskState::GetOngoingBarriers() {
return ongoing_barriers_for_task_;
}
void CoordinationServiceStandaloneImpl::TaskState::JoinBarrier(
std::string_view barrier_id) {
ongoing_barriers_for_task_.emplace(barrier_id);
}
void CoordinationServiceStandaloneImpl::TaskState::ExitBarrier(
std::string_view barrier_id) {
ongoing_barriers_for_task_.erase(barrier_id);
}
bool CoordinationServiceStandaloneImpl::TaskState::
IsDisconnectedBeyondGracePeriod() {
return GetState() == CoordinatedTaskState::TASKSTATE_DISCONNECTED &&
Env::Default()->NowMicros() > disconnect_grace_period_us_;
}
void CoordinationServiceStandaloneImpl::SetDeviceAggregationFunction(
std::function<DeviceInfo(const DeviceInfo& devices)>
post_aggregate_device_fn) {
post_aggregate_device_fn_ = std::move(post_aggregate_device_fn);
}
CoordinationServiceStandaloneImpl::CoordinationServiceStandaloneImpl(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> client_cache)
: client_cache_(std::move(client_cache)),
env_(*env),
heartbeat_timeout_ms_([&config]() -> uint64_t {
return config.heartbeat_timeout_in_ms() > 0
? config.heartbeat_timeout_in_ms()
: kDefaultHeartbeatTimeoutMs;
}()),
shutdown_barrier_timeout_(
absl::Milliseconds(config.shutdown_barrier_timeout_in_ms())),
allow_new_incarnation_to_reconnect_(
config.allow_new_incarnation_to_reconnect()) {
LOG(INFO) << "Initializing CoordinationService";
recoverable_jobs_ = absl::flat_hash_set<std::string>(
config.recoverable_jobs().cbegin(), config.recoverable_jobs().cend());
for (const auto& job : config.coordinated_job_list()) {
for (int i = 0; i < job.num_tasks(); ++i) {
const std::string task_name = GetTaskName(job.name(), i);
cluster_state_.emplace(task_name, std::make_unique<TaskState>());
}
}
StartCheckStaleness();
}
void CoordinationServiceStandaloneImpl::CheckHeartbeatTimeout() {
absl::Status status = absl::OkStatus();
std::vector<std::string_view> stale_task_names;
const bool has_service_to_client_connection = client_cache_ != nullptr;
{
absl::MutexLock l(&state_mu_);
for (const auto& [task_name, task_state] : cluster_state_) {
if (task_state->GetState() != CoordinatedTaskState::TASKSTATE_CONNECTED) {
continue;
}
const bool is_stale =
task_state->TimeSinceLastHeartbeatMs() > heartbeat_timeout_ms_;
VLOG(10) << "Checking staleness for " << task_name
<< " stale?=" << is_stale;
if (is_stale) {
stale_task_names.push_back(task_name);
status = MakeCoordinationError(absl::UnavailableError(
absl::StrCat("Task ", task_name,
" heartbeat timeout. This indicates that the "
"remote task has failed, got preempted, or "
"crashed unexpectedly. Check the task logs "
"for an earlier error to debug further.")));
SetTaskError(task_name, status);
}
}
}
if (!stale_task_names.empty()) {
if (!has_service_to_client_connection) {
absl::Status heartbeat_timeout_error =
MakeCoordinationError(absl::UnavailableError(absl::StrCat(
"The following tasks are unhealthy (stopped sending "
"heartbeats):\n",
absl::StrJoin(stale_task_names, "\n"),
"\nCheck the task logs for an earlier error to debug "
"further.")));
if (SendErrorPollingResponseOrStopService(heartbeat_timeout_error)) {
return;
}
} else {
for (const auto& stale_task_name : stale_task_names) {
PropagateError(GetTaskFromName(stale_task_name));
}
}
}
}
void CoordinationServiceStandaloneImpl::CheckBarrierTimeout() {
absl::flat_hash_map<std::string, BarrierState*> expired_barriers;
uint64_t current_time_micros = Env::Default()->NowMicros();
std::optional<std::string> shutdown_error;
{
absl::MutexLock l(&state_mu_);
for (std::string_view barrier_id : ongoing_barriers_) {
auto* barrier = &barriers_[barrier_id];
if (current_time_micros > barrier->deadline_in_micros) {
expired_barriers[barrier_id] = barrier;
}
}
for (const auto& [barrier_id, barrier] : expired_barriers) {
std::string pending_tasks;
int pending_task_count = 0;
for (const auto& [task, at_barrier] : barrier->tasks_at_barrier) {
if (at_barrier) {
continue;
}
++pending_task_count;
if (pending_task_count < kPendingTaskLogLimit) {
absl::StrAppend(&pending_tasks, GetTaskName(task), "\n");
}
}
const int64_t tasks_at_barrier =
barrier->tasks_at_barrier.size() - pending_task_count;
std::string error_message = absl::StrFormat(
"Barrier timed out. Id: %s. This usually happens because a task "
"triggered the barrier too early or too slowly. Please look at the "
"task logs (both timed out and first task) to debug further.\n"
"# of tasks that reached the barrier: %d/%d.\nThe first "
"task at the barrier: %s. Some timed out task names:\n%s",
barrier_id, tasks_at_barrier, barrier->tasks_at_barrier.size(),
GetTaskName(barrier->initiating_task), pending_tasks);
if (barrier_id == shutdown_barrier_id_) {
shutdown_error = error_message;
}
const absl::Status error =
MakeCoordinationError(absl::DeadlineExceededError(error_message));
PassBarrier(barrier_id, error, barrier);
}
}
const bool has_service_to_client_connection = client_cache_ != nullptr;
if (!has_service_to_client_connection && shutdown_error) {
SendErrorPollingResponseOrStopService(
MakeCoordinationError(absl::DeadlineExceededError(absl::StrCat(
"Shutdown barrier timed out. Error: ", *shutdown_error))));
}
}
void CoordinationServiceStandaloneImpl::CheckStaleness() {
while (true) {
{
absl::MutexLock l(&state_mu_);
check_staleness_thread_cv_.WaitWithTimeout(&state_mu_, absl::Seconds(1));
if (shutting_down_) {
return;
}
}
CheckHeartbeatTimeout();
CheckBarrierTimeout();
}
}
void CoordinationServiceStandaloneImpl::StartCheckStaleness() {
check_staleness_thread_.reset(env_.StartThread(
{}, kHealthCheckThread,
absl::bind_front(&CoordinationServiceStandaloneImpl::CheckStaleness,
this)));
}
void CoordinationServiceStandaloneImpl::Stop(bool shut_staleness_thread) {
{
absl::MutexLock l(&kv_mu_);
for (const auto& [key, get_kv_callbacks] : get_cb_) {
for (const auto& get_kv_callback : get_kv_callbacks) {
get_kv_callback(absl::CancelledError(
absl::StrCat("Coordination service is shutting down. Cancelling "
"GetKeyValue() for key: ",
key)));
}
}
get_cb_.clear();
}
{
absl::MutexLock l(&state_mu_);
shutting_down_ = true;
check_staleness_thread_cv_.SignalAll();
for (auto& [barrier_id, barrier] : barriers_) {
if (!barrier.passed) {
absl::Status error =
MakeCoordinationError(absl::AbortedError(absl::StrCat(
"Barrier failed because service is shutting down. Barrier_id: ",
barrier_id)));
PassBarrier(barrier_id, error, &barrier);
}
}
barriers_.clear();
cluster_state_.clear();
}
if (IsClientPollingForError()) {
SendErrorPollingResponse(
absl::CancelledError("Coordination service is shutting down. "
"Cancelling PollForErrorAsync()"));
}
if (shut_staleness_thread) {
check_staleness_thread_.reset();
}
}
bool CoordinationServiceStandaloneImpl::ServiceHasStopped() const {
return shutting_down_;
}
void CoordinationServiceStandaloneImpl::LogConnectStatusLocked() const {
const int num_tasks = cluster_state_.size();
int pending_tasks = 0;
std::vector<std::string> task_names;
for (const auto& [task_name, task_state] : cluster_state_) {
if (task_state->GetState() != CoordinatedTaskState::TASKSTATE_CONNECTED) {
pending_tasks++;
if (task_names.size() < kPendingStragglerLogLimit) {
task_names.push_back(task_name);
}
}
}
LOG(INFO) << "Waiting for " << pending_tasks << "/" << num_tasks
<< " tasks to connect.";
if (!task_names.empty()) {
LOG(INFO) << "Example stragglers:\n" << absl::StrJoin(task_names, "\n");
}
}
absl::Status CoordinationServiceStandaloneImpl::RegisterTask(
const CoordinatedTask& task, uint64_t incarnation) {
const std::string task_name = GetTaskName(task);
absl::Status error;
std::string error_message;
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(absl::StrCat(
"Coordination service has stopped. RegisterTask() from task: ",
task_name,
" failed. This usually implies an earlier error that caused "
"coordination service to shut down before the workers disconnect "
"gracefully. Check the task leader's logs for an earlier error to "
"debug the root cause.")));
}
if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Unexpected task registered with task_name=", task_name)));
}
auto* task_cluster_state = cluster_state_[task_name].get();
const auto task_state = task_cluster_state->GetState();
const auto task_status = task_cluster_state->GetStatus();
if (task_state == CoordinatedTaskState::TASKSTATE_DISCONNECTED ||
(allow_new_incarnation_to_reconnect_ &&
(absl::IsUnavailable(task_status) &&
task_status.GetPayload(CoordinationErrorPayloadKey())))) {
task_cluster_state->SetConnected(incarnation);
LOG(INFO) << task_name
<< " has connected to coordination service. Incarnation: "
<< incarnation;
LogConnectStatusLocked();
return absl::OkStatus();
} else if (task_state == CoordinatedTaskState::TASKSTATE_CONNECTED) {
if (task_cluster_state->GetTaskIncarnation() == incarnation) {
task_cluster_state->SetConnected(incarnation);
LOG(INFO) << task_name
<< " has connected to coordination service with the same "
<< "incarnation again: " << incarnation;
LogConnectStatusLocked();
return absl::OkStatus();
} else {
error_message =
absl::StrCat(task_name,
" unexpectedly tried to connect with a different "
"incarnation. It has likely restarted.");
}
} else {
error_message =
absl::StrCat(task_name,
" unexpectedly tried to connect while it is already in "
"error. ResetTask() should be called before a "
"subsequent connect attempt.");
}
LOG(ERROR) << error_message;
error = MakeCoordinationError(absl::AbortedError(error_message), task);
SetTaskError(task_name, error);
}
assert(!error.ok());
PropagateError(task);
return error;
}
void CoordinationServiceStandaloneImpl::WaitForAllTasks(
const CoordinatedTask& task, const DeviceInfo& devices,
StatusCallback done) {
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. WaitForAllTasks() failed.")));
return;
}
const auto& task_state = cluster_state_.find(GetTaskName(task));
if (task_state != cluster_state_.end() &&
!task_state->second->DeviceInfoIsCollected()) {
task_state->second->CollectDeviceInfo(devices);
}
}
BarrierAsync(device_propagation_barrier_id_, kDevicePropagationTimeout, task,
{}, std::move(done));
}
void CoordinationServiceStandaloneImpl::ShutdownTaskAsync(
const CoordinatedTask& task, StatusCallback done) {
VLOG(3) << "Task " << GetTaskName(task) << " invoked ShutdownTaskAsync()";
if (shutdown_barrier_timeout_ > absl::ZeroDuration()) {
BarrierAsync(shutdown_barrier_id_, shutdown_barrier_timeout_, task, {},
done);
} else {
absl::Status status;
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
status = MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. ShutdownTaskAsync() failed."));
} else {
status = DisconnectTask(task);
}
}
done(status);
}
}
absl::Status CoordinationServiceStandaloneImpl::ResetTask(
const CoordinatedTask& task) {
absl::MutexLock l(&state_mu_);
return DisconnectTask(task);
}
absl::Status CoordinationServiceStandaloneImpl::DisconnectTask(
const CoordinatedTask& task) {
const std::string task_name = GetTaskName(task);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
absl::StrCat("Coordination service has stopped. DisconnectTask() "
"failed for task_name=",
task_name)));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Unexpected disconnect request with task_name=", task_name)));
} else if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_DISCONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
absl::StrCat("The task is already disconnected: ", task_name)));
}
cluster_state_[task_name]->Disconnect(
heartbeat_timeout_ms_ * 1000);
for (const auto& barrier_id :
cluster_state_[task_name]->GetOngoingBarriers()) {
absl::Status error = MakeCoordinationError(absl::InternalError(absl::StrCat(
"Barrier failed because a task has disconnected. Barrier Id: ",
barrier_id, ", Task: ", task_name)));
PassBarrier(barrier_id, error, &barriers_[barrier_id]);
}
LOG(INFO) << task_name << " has disconnected from coordination service.";
return absl::OkStatus();
}
const DeviceInfo& CoordinationServiceStandaloneImpl::ListClusterDevices() {
return cluster_devices_;
}
uint64_t CoordinationServiceStandaloneImpl::GetServiceIncarnation() {
return service_incarnation_;
}
absl::Status CoordinationServiceStandaloneImpl::ReportTaskError(
const CoordinatedTask& task, absl::Status error) {
const std::string task_name = GetTaskName(task);
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. ReportTaskError() failed."));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected request from task ", task_name)));
} else if (cluster_state_[task_name]->GetState() !=
CoordinatedTaskState::TASKSTATE_CONNECTED) {
return MakeCoordinationError(absl::FailedPreconditionError(
"The task is not connected or already has an error."));
} else {
SetTaskError(task_name, error);
}
}
PropagateError(task, true);
return absl::OkStatus();
}
std::vector<CoordinatedTaskStateInfo>
CoordinationServiceStandaloneImpl::GetTaskState(
const std::vector<CoordinatedTask>& tasks) {
std::vector<CoordinatedTaskStateInfo> states_info;
for (const auto& task : tasks) {
const std::string task_name = GetTaskName(task);
auto& state_info = states_info.emplace_back();
absl::Status error;
{
absl::MutexLock l(&state_mu_);
state_info.set_state(cluster_state_[task_name]->GetState());
error = cluster_state_[task_name]->GetStatus();
}
*state_info.mutable_task() = task;
state_info.set_error_code(error.raw_code());
state_info.set_error_message(std::string(error.message()));
if (!error.ok()) {
*state_info.mutable_error_payload()->mutable_source_task() = task;
state_info.mutable_error_payload()->set_is_reported_error(false);
}
}
return states_info;
}
absl::Status CoordinationServiceStandaloneImpl::RecordHeartbeat(
const CoordinatedTask& task, uint64_t incarnation) {
const std::string task_name = GetTaskName(task);
absl::Status s = absl::OkStatus();
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(absl::StrCat(
"Coordination service has stopped. RecordHeartbeat() from task: ",
task_name,
" failed. This usually implies an earlier error that caused "
"coordination service to shut down before the workers disconnect "
"gracefully. Check the task leader's logs for an earlier error to "
"debug the root cause.")));
} else if (!cluster_state_.contains(task_name)) {
return MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected heartbeat request from task: ", task_name,
". This usually implies a configuration error.")));
}
if (!cluster_state_[task_name]->GetStatus().ok()) {
return cluster_state_[task_name]->GetStatus();
} else if (cluster_state_[task_name]->IsDisconnectedBeyondGracePeriod()) {
return MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Task with task_name=", task_name,
" must be registered before sending heartbeat messages")));
}
VLOG(10) << "Record heartbeat from task: " << task_name
<< "at incarnation: " << incarnation << "at " << absl::Now();
s = cluster_state_[task_name]->RecordHeartbeat(incarnation);
}
if (!s.ok()) {
{
absl::MutexLock l(&state_mu_);
SetTaskError(task_name, s);
}
PropagateError(task);
}
return s;
}
void CoordinationServiceStandaloneImpl::ReportServiceErrorToTaskAsync(
const CoordinatedTask& destination_task, absl::Status error) {
assert(!error.ok());
if (client_cache_ == nullptr) {
LOG(ERROR) << error;
return;
}
auto request = std::make_shared<ReportErrorToTaskRequest>();
auto response = std::make_shared<ReportErrorToTaskResponse>();
request->set_error_code(error.raw_code());
request->set_error_message(std::string(error.message()));
CoordinatedTask* error_source =
request->mutable_error_payload()->mutable_source_task();
error_source->set_job_name("coordination_service");
auto call_opts = std::make_shared<CallOptions>();
call_opts->SetTimeout(kServiceToClientTimeoutMs);
const std::string task_name = GetTaskName(destination_task);
CoordinationClient* client = client_cache_->GetClient(task_name);
client->ReportErrorToTaskAsync(
call_opts.get(), request.get(), response.get(),
[request, response, task_name, call_opts](absl::Status s) {
if (!s.ok()) {
LOG(ERROR) << "Encountered another error while reporting to "
<< task_name << ": " << s;
}
});
}
void CoordinationServiceStandaloneImpl::PropagateError(
const CoordinatedTask& source_task, bool is_reported_by_task) {
VLOG(3) << "PropagateError() from " << GetTaskName(source_task);
if (isRecoverableJob(source_task.job_name())) return;
absl::Status error;
{
absl::MutexLock l(&state_mu_);
error = cluster_state_[GetTaskName(source_task)]->GetStatus();
}
assert(!error.ok());
ReportErrorToTaskRequest request;
request.set_error_code(error.raw_code());
request.set_error_message(std::string(error.message()));
CoordinationServiceError* payload = request.mutable_error_payload();
*payload->mutable_source_task() = source_task;
payload->set_is_reported_error(is_reported_by_task);
CallOptions call_opts;
call_opts.SetTimeout(kServiceToClientTimeoutMs);
std::vector<std::shared_ptr<absl::Notification>> notifications;
std::vector<std::string_view> task_names;
{
absl::ReaderMutexLock l(&state_mu_);
task_names.reserve(cluster_state_.size());
for (const auto& pair : cluster_state_) {
task_names.emplace_back(pair.first);
}
}
for (std::string_view task : task_names) {
{
absl::MutexLock l(&state_mu_);
if (cluster_state_[task]->GetState() !=
CoordinatedTaskState::TASKSTATE_CONNECTED)
continue;
}
if (client_cache_ == nullptr) {
SendErrorPollingResponseOrStopService(error);
return;
}
CoordinationClient* client = client_cache_->GetClient(std::string(task));
auto response = std::make_shared<ReportErrorToTaskResponse>();
auto n = std::make_shared<absl::Notification>();
client->ReportErrorToTaskAsync(
&call_opts, &request, response.get(),
[response, n, task](absl::Status s) {
if (!s.ok()) {
LOG(ERROR) << "Encountered another error while reporting to "
<< task << ": " << s;
}
n->Notify();
});
notifications.push_back(n);
}
for (auto& n : notifications) {
n->WaitForNotification();
}
}
std::string NormalizeKey(std::string_view orig_key) {
std::string norm_key = std::string(orig_key);
const char* src = norm_key.c_str();
std::string::iterator dst = norm_key.begin();
while (*src) {
while (*src == '/') src++;
while (*src && *src != '/') {
*dst++ = *src++;
}
if (*src) {
*dst++ = *src++;
}
}
if (dst > norm_key.begin() && *(dst - 1) == '/') dst--;
norm_key.resize(dst - norm_key.begin());
return norm_key;
}
absl::Status CoordinationServiceStandaloneImpl::InsertKeyValue(
std::string_view key, std::string_view value) {
return InsertKeyValue(key, value, false);
}
absl::Status CoordinationServiceStandaloneImpl::InsertKeyValue(
std::string_view key, std::string_view value, bool allow_overwrite) {
VLOG(3) << "InsertKeyValue(): " << key << ": " << value
<< " allow_overwrite: " << allow_overwrite;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
if (!allow_overwrite && kv_store_.find(norm_key) != kv_store_.end()) {
return MakeCoordinationError(absl::AlreadyExistsError(
absl::StrCat("Config key ", key, " already exists.")));
}
kv_store_.insert_or_assign(norm_key, value);
auto iter = get_cb_.find(norm_key);
if (iter != get_cb_.end()) {
for (const auto& cb : iter->second) {
cb(value);
}
get_cb_.erase(iter);
}
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::GetKeyValueAsync(
std::string_view key, StatusOrValueCallback done) {
VLOG(3) << "GetKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const auto& iter = kv_store_.find(norm_key);
if (iter != kv_store_.end()) {
done(iter->second);
return;
}
auto cb_iter = get_cb_.find(norm_key);
if (cb_iter == get_cb_.end()) {
cb_iter =
get_cb_.emplace(norm_key, std::vector<StatusOrValueCallback>()).first;
}
cb_iter->second.emplace_back(std::move(done));
}
absl::StatusOr<std::string> CoordinationServiceStandaloneImpl::TryGetKeyValue(
std::string_view key) {
VLOG(3) << "TryGetKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const auto& iter = kv_store_.find(norm_key);
if (iter == kv_store_.end()) {
return absl::NotFoundError(absl::StrCat("Config key ", key, " not found."));
}
return iter->second;
}
std::vector<KeyValueEntry> CoordinationServiceStandaloneImpl::GetKeyValueDir(
std::string_view directory_key) {
VLOG(3) << "TryGetKeyValueDir(): " << directory_key;
std::vector<KeyValueEntry> kvs_in_directory;
const std::string norm_key = NormalizeKey(directory_key);
const std::string dir = absl::StrCat(norm_key, "/");
absl::MutexLock l(&kv_mu_);
auto begin = kv_store_.lower_bound(dir);
std::map<std::string, std::string>::iterator it;
for (it = begin; it != kv_store_.end(); ++it) {
if (std::mismatch(dir.begin(), dir.end(), it->first.begin()).first !=
dir.end()) {
break;
}
KeyValueEntry kv;
kv.set_key(it->first);
kv.set_value(it->second);
kvs_in_directory.push_back(kv);
}
return kvs_in_directory;
}
absl::Status CoordinationServiceStandaloneImpl::DeleteKeyValue(
std::string_view key) {
VLOG(3) << "DeleteKeyValue(): " << key;
const std::string norm_key = NormalizeKey(key);
absl::MutexLock l(&kv_mu_);
const std::string dir = absl::StrCat(norm_key, "/");
auto begin = kv_store_.lower_bound(dir);
std::map<std::string, std::string>::iterator end;
for (end = begin; end != kv_store_.end(); end++) {
if (std::mismatch(dir.begin(), dir.end(), end->first.begin()).first !=
dir.end())
break;
}
kv_store_.erase(begin, end);
auto iter = kv_store_.find(norm_key);
if (iter != kv_store_.end()) {
kv_store_.erase(iter);
}
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::SetTaskError(std::string_view task_name,
absl::Status error) {
cluster_state_[task_name]->SetError(error);
for (const auto& barrier_id :
cluster_state_[task_name]->GetOngoingBarriers()) {
absl::Status barrier_error =
MakeCoordinationError(absl::InternalError(absl::StrCat(
"Barrier failed beacuse a task is in error. Barrier Id: ",
barrier_id, ", Task: ", task_name, "Error: ", error.message())));
PassBarrier(barrier_id, barrier_error, &barriers_[barrier_id]);
}
LOG(ERROR) << task_name
<< " has been set to ERROR in coordination service: " << error;
}
void CoordinationServiceStandaloneImpl::PollForErrorAsync(
const CoordinatedTask& task, StatusCallback done) {
const std::string task_name = GetTaskName(task);
VLOG(3) << "Task " << task_name << " invoked PollForErrorAsync().";
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"PollForError requested after coordination service has shut down.")));
return;
}
if (client_cache_ != nullptr) {
done(MakeCoordinationError(
absl::InternalError("Should not use error polling from service when "
"there is service to client connection.")));
return;
}
client_polling_for_error_ = true;
if (!cluster_state_.contains(task_name)) {
done(MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected task (", task_name,
") that is not in the cluster polling for errors."))));
return;
}
if (cluster_state_[task_name]->IsDisconnectedBeyondGracePeriod()) {
done(MakeCoordinationError(absl::FailedPreconditionError(
absl::StrCat("Task (", task_name,
") that has not been registered or has disconnected "
"polling for errors."))));
return;
}
if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_ERROR) {
done(MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Task (", task_name,
") that is already in error state polling for errors. Current error: ",
cluster_state_[task_name]->GetStatus().ToString()))));
return;
}
if (error_polling_state_.Responded()) {
done(error_polling_state_.GetError());
return;
}
error_polling_state_.AddTask(task, std::move(done));
}
bool CoordinationServiceStandaloneImpl::ValidateBarrierArgs(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
const std::string source_task_name = GetTaskName(task);
bool among_participating_tasks =
std::find_if(participating_tasks.begin(), participating_tasks.end(),
[&](const CoordinatedTask& task) {
return GetTaskName(task) == source_task_name;
}) != participating_tasks.end();
if (!participating_tasks.empty() && !among_participating_tasks) {
const std::string task_name = GetTaskName(task);
absl::Status error = MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("A non-participating task (", GetTaskName(task),
") called the barrier: ", barrier_id)));
{
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Barrier requested after coordination service has shut down.")));
return false;
}
auto pair = barriers_.try_emplace(barrier_id);
auto it = pair.first;
auto* barrier = &it->second;
PassBarrier(barrier_id, error, barrier);
}
done(error);
return false;
}
return true;
};
bool CoordinationServiceStandaloneImpl::InitializeBarrier(
BarrierState* barrier, std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
barrier->passed = false;
barrier->initiating_task = task;
if (participating_tasks.empty()) {
for (const auto& task_state : cluster_state_) {
std::string_view task_name = task_state.first;
barrier->tasks_at_barrier[GetTaskFromName(task_name)] = false;
}
} else {
for (const auto& task : participating_tasks) {
const std::string task_name = GetTaskName(task);
if (!cluster_state_.contains(task_name)) {
absl::Status error = MakeCoordinationError(absl::InvalidArgumentError(
absl::StrCat("Unexpected task (", task_name,
") that is not in the cluster called the barrier. "
"Barrier Id: ",
barrier_id)));
PassBarrier(barrier_id, error, barrier);
done(error);
return false;
}
barrier->tasks_at_barrier[task] = false;
}
}
barrier->num_pending_tasks = barrier->tasks_at_barrier.size();
for (const auto& pending_task : barrier->tasks_at_barrier) {
const std::string task_name = GetTaskName(pending_task.first);
if (cluster_state_[task_name]->GetState() ==
CoordinatedTaskState::TASKSTATE_ERROR) {
absl::Status error = MakeCoordinationError(absl::InternalError(
absl::StrCat("Task (", task_name,
") is already in error before the barrier "
"was called. Barrier Id: ",
barrier_id)));
PassBarrier(barrier_id, error, barrier);
done(error);
return false;
}
}
barrier->deadline_in_micros =
Env::Default()->NowMicros() + (timeout / absl::Microseconds(1));
ongoing_barriers_.emplace(barrier_id);
const size_t num_ongoing_barriers = ongoing_barriers_.size();
if (num_ongoing_barriers > kOngoingBarriersSoftLimit) {
LOG(WARNING) << "There is a high number of ongoing barriers in "
"coordination service: "
<< num_ongoing_barriers;
}
for (const auto& pending_task : barrier->tasks_at_barrier) {
const CoordinatedTask& task = pending_task.first;
cluster_state_[GetTaskName(task)]->JoinBarrier(barrier_id);
}
return true;
}
void CoordinationServiceStandaloneImpl::BarrierAsync(
std::string_view barrier_id, absl::Duration timeout,
const CoordinatedTask& task,
const std::vector<CoordinatedTask>& participating_tasks,
StatusCallback done) {
VLOG(3) << "Task " << GetTaskName(task) << " invoked BarrierAsync("
<< barrier_id << ").";
if (!ValidateBarrierArgs(barrier_id, timeout, task, participating_tasks,
done)) {
return;
}
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
done(MakeCoordinationError(absl::InternalError(
"Barrier requested after coordination service has shut down.")));
return;
}
auto pair = barriers_.try_emplace(barrier_id);
auto it = pair.first;
bool inserted = pair.second;
auto* barrier = &it->second;
if (inserted) {
if (!InitializeBarrier(barrier, barrier_id, timeout, task,
participating_tasks, done)) {
return;
}
}
if (barrier->passed) {
if (barrier_id == shutdown_barrier_id_) {
absl::Status s = DisconnectTask(task);
if (!s.ok()) {
done(s);
return;
}
}
done(barrier->result);
return;
}
barrier->done_callbacks.push_back(done);
if (!ValidateTaskArgs(participating_tasks, barrier->tasks_at_barrier,
cluster_state_.size())) {
absl::Status error =
MakeCoordinationError(absl::InvalidArgumentError(absl::StrCat(
"Conflicting tasks specified for the same barrier: ", barrier_id)));
PassBarrier(barrier_id, error, barrier);
return;
}
if (!barrier->tasks_at_barrier[task]) {
barrier->tasks_at_barrier[task] = true;
--barrier->num_pending_tasks;
if (barrier->num_pending_tasks == 0) {
PassBarrier(barrier_id, absl::OkStatus(), barrier);
return;
}
}
}
absl::Status CoordinationServiceStandaloneImpl::CancelBarrier(
std::string_view barrier_id, const CoordinatedTask& task) {
absl::MutexLock l(&state_mu_);
if (ServiceHasStopped()) {
return MakeCoordinationError(absl::InternalError(
"Coordination service has stopped. CancelBarrier() failed."));
}
auto [it, inserted] = barriers_.try_emplace(barrier_id);
auto* barrier = &it->second;
if (inserted) {
LOG(WARNING) << "Barrier (" << barrier_id
<< ") is cancelled before being created by task: "
<< GetTaskName(task);
}
if (barrier->passed) {
return MakeCoordinationError(absl::FailedPreconditionError(absl::StrCat(
"Barrier (", barrier_id, ") has already been passed with status code: ",
barrier->result.code())));
}
absl::Status cancelled = MakeCoordinationError(absl::CancelledError(
absl::StrCat("Barrier (", barrier_id,
") is cancelled by task: ", GetTaskName(task))));
PassBarrier(barrier_id, cancelled, barrier);
VLOG(3) << "Barrier (" << barrier_id << ") is cancelled.";
return absl::OkStatus();
}
void CoordinationServiceStandaloneImpl::PassBarrier(std::string_view barrier_id,
absl::Status result,
BarrierState* barrier) {
barrier->passed = true;
barrier->result = result;
VLOG(3) << "Barrier(" << barrier_id << ") has passed with status: " << result;
if (barrier_id == device_propagation_barrier_id_) {
AggregateClusterDevices();
}
for (const auto& task_at_barrier : barrier->tasks_at_barrier) {
const CoordinatedTask& task = task_at_barrier.first;
cluster_state_[GetTaskName(task)]->ExitBarrier(barrier_id);
}
if (barrier_id == shutdown_barrier_id_) {
CompleteShutdownAfterBarrier(result, barrier);
}
barrier->tasks_at_barrier.clear();
ongoing_barriers_.erase(barrier_id);
for (const auto& callback : barrier->done_callbacks) {
callback(result);
}
barrier->done_callbacks.clear();
}
void CoordinationServiceStandaloneImpl::SendErrorPollingResponse(
const absl::Status& error) {
CHECK(IsClientPollingForError())
<< "`SendErrorPollingResponse` should only be called after agents poll "
"errors from the service.";
{
absl::MutexLock l(&state_mu_);
if (error_polling_state_.Responded()) {
return;
}
}
if (!absl::IsCancelled(error)) {
VLOG(2) << "An error is encountered. Sending the error as a response to "
"all error polling requests: "
<< error;
}
std::vector<std::string> missing_tasks;
{
absl::MutexLock l(&state_mu_);
missing_tasks.reserve(cluster_state_.size());
for (const auto& [task_name, task_state] : cluster_state_) {
if (!error_polling_state_.IsTaskPolling(task_name)) {
missing_tasks.push_back(task_name);
}
}
error_polling_state_.SetError(error);
}
if (!missing_tasks.empty()) {
LOG(ERROR) << absl::StrFormat(
"The following %d tasks in the cluster has not sent request to poll "
"for error. Error will not be propagated to these tasks: %s",
missing_tasks.size(), absl::StrJoin(missing_tasks, ","));
}
}
bool CoordinationServiceStandaloneImpl::ValidateTaskArgs(
const std::vector<CoordinatedTask>& tasks_args,
const absl::flat_hash_map<CoordinatedTask, bool, CoordinatedTaskHash,
CoordinatedTaskEqual>& tasks_at_barrier,
int64_t cluster_size) {
if (tasks_args.empty()) {
return tasks_at_barrier.size() == cluster_size;
} else if (tasks_at_barrier.size() != tasks_args.size()) {
return false;
} else {
for (const auto& task : tasks_args) {
if (!tasks_at_barrier.contains(task)) {
return false;
}
}
}
return true;
}
void CoordinationServiceStandaloneImpl::AggregateClusterDevices() {
assert(cluster_devices_.device_size() == 0);
std::vector<CoordinatedTask> ordered_tasks;
ordered_tasks.reserve(cluster_state_.size());
for (const auto& task : cluster_state_) {
ordered_tasks.push_back(GetTaskFromName(task.first));
}
std::sort(ordered_tasks.begin(), ordered_tasks.end(),
[](const CoordinatedTask& task1, const CoordinatedTask& task2) {
if (task1.job_name() != task2.job_name()) {
return task1.job_name() < task2.job_name();
}
return task1.task_id() < task2.task_id();
});
for (const auto& task : ordered_tasks) {
cluster_devices_.MergeFrom(
cluster_state_[GetTaskName(task)]->GetDeviceInfo());
}
if (post_aggregate_device_fn_ != nullptr) {
cluster_devices_ = post_aggregate_device_fn_(cluster_devices_);
}
}
void CoordinationServiceStandaloneImpl::CompleteShutdownAfterBarrier(
absl::Status result, BarrierState* barrier) {
if (result.ok()) {
LOG(INFO) << "Shutdown barrier in coordination service has passed.";
} else {
LOG(ERROR) << "Shutdown barrier in coordination service has failed:\n"
<< result
<< "\nThis suggests that the workers are out of sync. Either "
"at least one worker is too fast in its execution / "
"crashed early or too slow / hanging. Check the logs for "
"an earlier error to identify the root cause.";
}
absl::Status shutdown_error = MakeCoordinationError(absl::InternalError(
absl::StrCat("Shutdown barrier has failed, but this task is not at the "
"barrier yet.\nBarrier result: '",
barrier->result.message())));
for (const auto& [task, at_barrier] : barrier->tasks_at_barrier) {
if (at_barrier) {
absl::Status disconnect_status = DisconnectTask(task);
if (!disconnect_status.ok()) {
LOG(ERROR) << disconnect_status;
}
} else {
ReportServiceErrorToTaskAsync(task, shutdown_error);
}
}
}
}
std::unique_ptr<CoordinationServiceInterface> EnableCoordinationService(
Env* env, const CoordinationServiceConfig& config,
std::unique_ptr<CoordinationClientCache> cache) {
return std::make_unique<CoordinationServiceStandaloneImpl>(env, config,
std::move(cache));
}
bool CoordinationServiceStandaloneImpl::isRecoverableJob(
const std::string_view task_name) const {
return recoverable_jobs_.find(task_name) != recoverable_jobs_.end();
}
bool CoordinationServiceStandaloneImpl::SendErrorPollingResponseOrStopService(
const absl::Status& error) {
CHECK(!error.ok()) << "SendErrorPollingResponseOrStopService called with OK "
"status. Should always return an error.";
assert(client_cache_ == nullptr);
if (IsClientPollingForError()) {
LOG(ERROR)
<< "Use error polling to propagate the following error to all tasks: "
<< error;
SendErrorPollingResponse(error);
return false;
}
LOG(ERROR) << "Stopping coordination service as there is no "
"service-to-client connection, but we encountered an error: "
<< error;
Stop(false);
return true;
}
bool CoordinationServiceStandaloneImpl::IsClientPollingForError() const {
return client_polling_for_error_;
}
REGISTER_COORDINATION_SERVICE("standalone", EnableCoordinationService);
} | #include "xla/tsl/distributed_runtime/coordination/coordination_service.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/synchronization/notification.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_error_util.h"
#include "xla/tsl/distributed_runtime/coordination/test_device.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
using ::testing::Each;
using ::testing::EqualsProto;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
using ::testing::status::StatusIs;
using tensorflow::CoordinatedJob;
using tensorflow::CoordinatedTask;
using tensorflow::CoordinationServiceConfig;
using tensorflow::DeviceInfo;
using tensorflow::KeyValueEntry;
using tensorflow::TestDevice;
using tensorflow::TestDeviceList;
constexpr absl::Duration kHeartbeatTimeout = absl::Seconds(2);
constexpr absl::Duration kShutdownBarrierTimeout = absl::Milliseconds(500);
constexpr char kCoordinationServiceType[] = "standalone";
KeyValueEntry CreateKv(const std::string& key, const std::string& value) {
KeyValueEntry kv;
kv.set_key(key);
kv.set_value(value);
return kv;
}
CoordinationServiceConfig GetCoordinationServiceConfig(int num_tasks) {
CoordinationServiceConfig config;
config.set_service_type(kCoordinationServiceType);
CoordinatedJob* job = config.mutable_coordinated_job_list()->Add();
job->set_name("worker");
job->set_num_tasks(num_tasks);
return config;
}
class TestCoordinationClient : public CoordinationClient {
public:
TestCoordinationClient() = default;
absl::Status GetStatus() {
absl::MutexLock l(&mu_);
return status_;
}
void RegisterTaskAsync(CallOptions* opts, const RegisterTaskRequest* request,
RegisterTaskResponse* response,
StatusCallback done) override {
done(absl::OkStatus());
}
void ReportErrorToTaskAsync(CallOptions* call_opts,
const ReportErrorToTaskRequest* request,
ReportErrorToTaskResponse* response,
StatusCallback done) override {
absl::MutexLock l(&mu_);
status_ = absl::Status(static_cast<absl::StatusCode>(request->error_code()),
request->error_message());
done(absl::OkStatus());
}
#define UNIMPLEMENTED(method) \
void method##Async(const method##Request* request, \
method##Response* response, StatusCallback done) \
override{done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED(WaitForAllTasks);
UNIMPLEMENTED(ResetTask);
UNIMPLEMENTED(ReportErrorToService);
UNIMPLEMENTED(GetTaskState);
UNIMPLEMENTED(InsertKeyValue);
UNIMPLEMENTED(TryGetKeyValue);
UNIMPLEMENTED(GetKeyValueDir);
UNIMPLEMENTED(DeleteKeyValue);
UNIMPLEMENTED(Barrier);
UNIMPLEMENTED(CancelBarrier);
#undef UNIMPLEMENTED
#define UNIMPLEMENTED_WITH_CALL_OPTS(method) \
void method##Async(CallOptions* call_opts, const method##Request* request, \
method##Response* response, StatusCallback done) \
override{done(absl::UnimplementedError(#method "Async")); \
}
UNIMPLEMENTED_WITH_CALL_OPTS(GetKeyValue);
UNIMPLEMENTED_WITH_CALL_OPTS(Heartbeat);
UNIMPLEMENTED_WITH_CALL_OPTS(ShutdownTask);
UNIMPLEMENTED_WITH_CALL_OPTS(PollForError);
#undef UNIMPLEMENTED_WITH_CALL_OPTS
private:
absl::Mutex mu_;
absl::Status status_ ABSL_GUARDED_BY(mu_);
};
class TestCoordinationClientCache : public CoordinationClientCache {
public:
void AddTask(const std::string& target, CoordinationClient* client) {
clients_.emplace(target, client);
}
CoordinationClient* GetClient(const string& target) override {
auto it = clients_.find(target);
if (it == clients_.end()) return nullptr;
return it->second;
}
std::unique_ptr<CoordinationClient> GetOwnedClient(
const string& target) override {
LOG(ERROR) << "GetOwnedClient is not supported.";
return nullptr;
}
private:
std::unordered_map<std::string, CoordinationClient*> clients_;
};
class CoordinationBarrierTest : public ::testing::Test {
protected:
CoordinationBarrierTest() {
const int num_tasks = 3;
auto client_cache = std::make_unique<TestCoordinationClientCache>();
for (int i = 0; i < num_tasks; ++i) {
CoordinatedTask task;
task.set_job_name("worker");
task.set_task_id(i);
auto client = std::make_unique<TestCoordinationClient>();
client_cache->AddTask(absl::StrCat("/job:worker/replica:0/task:", i),
client.get());
tasks_.push_back(task);
clients_.push_back(std::move(client));
}
CoordinationServiceConfig config = GetCoordinationServiceConfig(num_tasks);
coord_service_ = CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
for (int i = 0; i < num_tasks; ++i) {
absl::Status s =
coord_service_->RegisterTask(tasks_[i], 0);
if (!s.ok()) {
LOG(FATAL) << "RegisterTask() failed in CoordinationBarrierTest(): "
<< s;
}
}
}
CoordinationServiceInterface* GetCoordinationService() {
return coord_service_.get();
}
CoordinatedTask GetTask(int i) { return tasks_[i]; }
std::string GetTaskName(const CoordinatedTask& task) {
return absl::StrCat("/job:", task.job_name(), "/replica:", 0,
"/task:", task.task_id());
}
std::vector<TestCoordinationClient*> GetClients() {
std::vector<TestCoordinationClient*> clients;
for (const auto& client : clients_) {
clients.push_back(client.get());
}
return clients;
}
private:
std::unique_ptr<CoordinationServiceInterface> coord_service_;
std::vector<CoordinatedTask> tasks_;
std::vector<std::unique_ptr<TestCoordinationClient>> clients_;
};
class CoordinateTwoTasksTest : public ::testing::Test {
protected:
CoordinateTwoTasksTest() {
task_0_.set_job_name("worker");
task_0_.set_task_id(0);
task_1_.set_job_name("worker");
task_1_.set_task_id(1);
}
void EnableCoordinationService(
bool has_service_to_client_connection = true,
bool enable_shutdown_barrier = false,
bool set_worker_job_recoverable = false,
bool allow_new_incarnation_to_reconnect = false) {
CoordinationServiceConfig config =
GetCoordinationServiceConfig(2);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
if (has_service_to_client_connection) {
client_cache->AddTask("/job:worker/replica:0/task:0", &client_0_);
client_cache->AddTask("/job:worker/replica:0/task:1", &client_1_);
} else {
client_cache = nullptr;
}
config.set_heartbeat_timeout_in_ms(kHeartbeatTimeout /
absl::Milliseconds(1));
if (set_worker_job_recoverable) {
config.mutable_recoverable_jobs()->Add("worker");
}
if (enable_shutdown_barrier) {
config.set_shutdown_barrier_timeout_in_ms(kShutdownBarrierTimeout /
absl::Milliseconds(1));
}
if (allow_new_incarnation_to_reconnect) {
config.set_allow_new_incarnation_to_reconnect(true);
}
coord_service_ = CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
}
CoordinatedTask task_0_;
const uint64_t incarnation_0_ = random::New64();
const uint64_t incarnation_0_new_ = random::New64();
TestCoordinationClient client_0_;
CoordinatedTask task_1_;
const uint64_t incarnation_1_ = random::New64();
const uint64_t incarnation_1_new_ = random::New64();
TestCoordinationClient client_1_;
std::unique_ptr<CoordinationServiceInterface> coord_service_;
};
TestDevice CreateTestDevice(absl::string_view name, int local_id = 0) {
TestDevice device;
device.set_name(name);
device.set_local_id(local_id);
return device;
}
TEST_F(CoordinateTwoTasksTest, TestStandaloneService) {
EnableCoordinationService();
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Notification wait_for_all;
coord_service_->WaitForAllTasks(task_0_, {}, [&](absl::Status s) {
ASSERT_OK(s);
wait_for_all.Notify();
});
ASSERT_FALSE(wait_for_all.HasBeenNotified());
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->WaitForAllTasks(task_1_, {},
[&](absl::Status s) { ASSERT_OK(s); });
wait_for_all.WaitForNotification();
ASSERT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RecordHeartbeat(task_1_, incarnation_1_));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_2, 0),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, 0),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, 0),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(client_0_.GetStatus(), StatusIs(absl::StatusCode::kAborted));
}
TEST(CoordinationServiceTest, TestCoordinatedJobs) {
CoordinatedTask chief;
chief.set_job_name("chief");
chief.set_task_id(0);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask evaluator;
evaluator.set_job_name("evaluator");
evaluator.set_task_id(0);
CoordinationServiceConfig config;
config.set_service_type(kCoordinationServiceType);
CoordinatedJob* chief_job = config.mutable_coordinated_job_list()->Add();
chief_job->set_name("chief");
chief_job->set_num_tasks(1);
CoordinatedJob* worker_job = config.mutable_coordinated_job_list()->Add();
worker_job->set_name("worker");
worker_job->set_num_tasks(2);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
TestCoordinationClient ci;
client_cache->AddTask("/job:chief/replica:0/task:0", &ci);
TestCoordinationClient wi0;
client_cache->AddTask("/job:worker/replica:0/task:0", &wi0);
TestCoordinationClient wi1;
client_cache->AddTask("/job:worker/replica:0/task:1", &wi1);
TestCoordinationClient ei;
client_cache->AddTask("/job:evaluator/replica:0/task:0", &ei);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification register_chief;
ASSERT_OK(coord_service->RegisterTask(chief, 0));
coord_service->WaitForAllTasks(chief, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_chief.Notify();
});
absl::Notification register_task0;
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
coord_service->WaitForAllTasks(task_0, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_task0.Notify();
});
absl::Notification register_task1;
ASSERT_OK(coord_service->RegisterTask(task_1, 0));
coord_service->WaitForAllTasks(task_1, {}, [&](absl::Status s) {
ASSERT_OK(s);
register_task1.Notify();
});
register_chief.WaitForNotification();
register_task0.WaitForNotification();
register_task1.WaitForNotification();
absl::Status status =
coord_service->RegisterTask(evaluator, 0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(CoordinationServiceTest, RegisterTask_AlreadyConnected_Succeeds) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
const absl::Status status =
coord_service->RegisterTask(task_0, 0);
TF_EXPECT_OK(status) << status;
}
TEST(CoordinationServiceTest,
RegisterTask_AlreadyConnectedDifferentIncarnation_Fails) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
const absl::Status status =
coord_service->RegisterTask(task_0, 1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAborted));
}
TEST(CoordinationServiceTest, RegisterTask_AlreadyInError_Fails) {
CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config,
nullptr);
ASSERT_OK(coord_service->RegisterTask(task_0, 0));
ASSERT_OK(coord_service->ReportTaskError(task_0,
absl::InternalError("test_error")));
const absl::Status status =
coord_service->RegisterTask(task_0, 0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, TestTaskHeartbeatTimeout) {
EnableCoordinationService();
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, incarnation_1_),
StatusIs(absl::StatusCode::kUnavailable));
}
TEST_F(CoordinateTwoTasksTest,
ErrorPollingRequestsGotCancelledErrorUponServiceShutdown) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
EXPECT_EQ(statuses.size(), 0);
coord_service_.reset();
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kCancelled)));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutWithoutServerToClientConnection) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_1_, incarnation_1_),
StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutErrorCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0, n1;
absl::Status s0, s1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(s1, StatusIs(absl::StatusCode::kUnavailable));
}
TEST_F(CoordinateTwoTasksTest,
HeartbeatTimeoutErrorFromOneTaskCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s0, s1;
absl::Notification n0, n1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
const int64_t sleeping_time =
absl::ToInt64Microseconds(0.9 * kHeartbeatTimeout);
Env::Default()->SleepForMicroseconds(sleeping_time);
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(sleeping_time);
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(sleeping_time);
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0,
StatusIs(absl::StatusCode::kUnavailable, HasSubstr("task:1")));
EXPECT_THAT(s1,
StatusIs(absl::StatusCode::kUnavailable, HasSubstr("task:1")));
}
TEST_F(CoordinateTwoTasksTest, ReportedErrorCanPropagateThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
ASSERT_OK(coord_service_->ReportTaskError(task_1_,
absl::InternalError("test_error")));
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kInternal)));
}
TEST_F(CoordinateTwoTasksTest, TestTaskRestart) {
EnableCoordinationService();
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s =
coord_service_->RegisterTask(task_1_, random::New64());
EXPECT_THAT(s, StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(client_0_.GetStatus(), StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, InsertKeyValue_Duplicate_Fail) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "original_value"));
EXPECT_THAT(coord_service_->InsertKeyValue("key0", "never_added"),
StatusIs(absl::StatusCode::kAlreadyExists));
auto result = coord_service_->TryGetKeyValue("key0");
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value(), "original_value");
}
TEST_F(CoordinateTwoTasksTest, InsertKeyValue_Duplicate_Overwrite) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "original_value"));
TF_EXPECT_OK(coord_service_->InsertKeyValue("key0", "overwritten_value",
true));
auto result = coord_service_->TryGetKeyValue("key0");
TF_EXPECT_OK(result.status());
EXPECT_EQ(result.value(), "overwritten_value");
}
TEST_F(CoordinateTwoTasksTest, TestSetGetValues) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("key0", "value0"));
ASSERT_OK(coord_service_->InsertKeyValue("/path", "value"));
ASSERT_OK(coord_service_->InsertKeyValue("/path/to/key1", "value1"));
ASSERT_OK(coord_service_->InsertKeyValue("path/to
absl::Notification n1;
absl::StatusOr<std::string_view> ret;
coord_service_->GetKeyValueAsync(
"key0", [&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n1.Notify();
});
n1.WaitForNotification();
ASSERT_OK(ret.status());
EXPECT_EQ(ret.value(), "value0");
absl::Notification n2;
coord_service_->GetKeyValueAsync(
"path
[&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n2.Notify();
});
n2.WaitForNotification();
EXPECT_EQ(ret.value(), "value1");
ASSERT_OK(coord_service_->DeleteKeyValue("key0"));
absl::Notification n3;
coord_service_->GetKeyValueAsync(
"key0", [&](const absl::StatusOr<std::string_view>& status_or_value) {
ret = status_or_value;
n3.Notify();
});
EXPECT_FALSE(n3.HasBeenNotified());
ASSERT_OK(coord_service_->InsertKeyValue("key0", "value0_new"));
n3.WaitForNotification();
EXPECT_EQ(ret.value(), "value0_new");
ASSERT_OK(coord_service_->DeleteKeyValue("/path"));
auto n4 = std::make_shared<absl::Notification>();
coord_service_->GetKeyValueAsync(
"/path/to/key1",
[n4](const absl::StatusOr<std::string_view>& status_or_value) {
n4->Notify();
});
EXPECT_FALSE(n4->HasBeenNotified());
}
TEST(CoordinationServiceTest, TryGetKeyValue) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(1);
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::StatusOr<std::string> result =
coord_service->TryGetKeyValue("test_key");
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kNotFound));
ASSERT_OK(coord_service->InsertKeyValue("test_key", "test_value"));
result = coord_service->TryGetKeyValue("test_key");
EXPECT_EQ(result.value(), "test_value");
ASSERT_OK(coord_service->DeleteKeyValue("test_key"));
result = coord_service->TryGetKeyValue("test_key");
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_SingleValueInDirectory) {
EnableCoordinationService();
KeyValueEntry kv = CreateKv("dir/path", "value0");
ASSERT_OK(coord_service_->InsertKeyValue(kv.key(), kv.value()));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, UnorderedElementsAre(EqualsProto(kv)));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_MultipleValuesInDirectory) {
EnableCoordinationService();
KeyValueEntry kv = CreateKv("dir/path", "value0");
KeyValueEntry kv2 = CreateKv("dir/path2", "value1");
KeyValueEntry kv_sub = CreateKv("dir/sub_dir/path", "value_sub");
ASSERT_OK(coord_service_->InsertKeyValue(kv.key(), kv.value()));
ASSERT_OK(coord_service_->InsertKeyValue(kv2.key(), kv2.value()));
ASSERT_OK(coord_service_->InsertKeyValue(kv_sub.key(), kv_sub.value()));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, UnorderedElementsAre(EqualsProto(kv), EqualsProto(kv2),
EqualsProto(kv_sub)));
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_Empty_ReturnsEmptyList) {
EnableCoordinationService();
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_WrongDir_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir0/path", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest, GetKeyValueDir_WrongDirPrefix_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("wrong_dir/dir/path", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest,
GetKeyValueDir_NonDirectoryPrefix_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir_key", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
TEST_F(CoordinateTwoTasksTest,
GetKeyValueDir_NonDirectoryKey_ReturnsEmptyList) {
EnableCoordinationService();
ASSERT_OK(coord_service_->InsertKeyValue("dir", "value0"));
std::vector<KeyValueEntry> result = coord_service_->GetKeyValueDir("dir");
EXPECT_THAT(result, IsEmpty());
}
}
TEST(CoordinationServiceTest, ListClusterDevices_TfDevice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(3);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
DeviceInfo local_devices_2;
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device0"));
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device1"));
local_devices_1.mutable_device()->Add()->PackFrom(
CreateTestDevice("task1_device0"));
local_devices_2.mutable_device()->Add()->PackFrom(
CreateTestDevice("task2_device0"));
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_0, local_devices_0,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_1, local_devices_1,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_2, local_devices_2, [&](absl::Status s) {
ASSERT_OK(s);
cluster_devices = coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
auto expected_devices = expected_cluster_devices.mutable_device();
expected_devices->Add(local_devices_0.device().begin(),
local_devices_0.device().end());
expected_devices->Add(local_devices_1.device().begin(),
local_devices_1.device().end());
expected_devices->Add(local_devices_2.device().begin(),
local_devices_2.device().end());
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST(CoordinationServiceTest, ListClusterDevices_XlaDevice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(3);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
CoordinatedTask task_2;
task_2.set_job_name("worker");
task_2.set_task_id(2);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
coord_service->SetDeviceAggregationFunction(
[](const DeviceInfo& raw_global_devices) {
TestDeviceList global_device_list;
int global_id = 0;
for (const auto& device : raw_global_devices.device()) {
TestDevice local_device;
device.UnpackTo(&local_device);
local_device.set_global_id(global_id++);
*global_device_list.mutable_device()->Add() = local_device;
}
DeviceInfo global_devices;
global_devices.mutable_device()->Add()->PackFrom(global_device_list);
return global_devices;
});
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
DeviceInfo local_devices_2;
TestDevice local_0 = CreateTestDevice("task0_device0", 0);
TestDevice local_0_1 = CreateTestDevice("task0_device1", 1);
TestDevice local_1 = CreateTestDevice("task1_device0", 0);
TestDevice local_2 = CreateTestDevice("task2_device0", 0);
local_devices_0.mutable_device()->Add()->PackFrom(local_0);
local_devices_0.mutable_device()->Add()->PackFrom(local_0_1);
local_devices_1.mutable_device()->Add()->PackFrom(local_1);
local_devices_2.mutable_device()->Add()->PackFrom(local_2);
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_1, local_devices_1,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_0, local_devices_0,
[&](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_2, local_devices_2, [&](absl::Status s) {
ASSERT_OK(s);
cluster_devices = coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
TestDeviceList global_device_list;
local_0.set_global_id(0);
local_0_1.set_global_id(1);
local_1.set_global_id(2);
local_2.set_global_id(3);
*global_device_list.add_device() = local_0;
*global_device_list.add_device() = local_0_1;
*global_device_list.add_device() = local_1;
*global_device_list.add_device() = local_2;
expected_cluster_devices.mutable_device()->Add()->PackFrom(
global_device_list);
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST(CoordinationServiceTest, ListClusterDevices_DevicesAreNotAddedTwice) {
const CoordinationServiceConfig config =
GetCoordinationServiceConfig(2);
CoordinatedTask task_0;
task_0.set_job_name("worker");
task_0.set_task_id(0);
CoordinatedTask task_1;
task_1.set_job_name("worker");
task_1.set_task_id(1);
absl::Status status = absl::OkStatus();
auto client_cache = std::make_unique<TestCoordinationClientCache>();
std::unique_ptr<CoordinationServiceInterface> coord_service =
CoordinationServiceInterface::EnableCoordinationService(
Env::Default(), config, std::move(client_cache));
absl::Notification n;
DeviceInfo local_devices_0;
DeviceInfo local_devices_1;
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device0"));
local_devices_0.mutable_device()->Add()->PackFrom(
CreateTestDevice("task0_device1"));
local_devices_1.mutable_device()->Add()->PackFrom(
CreateTestDevice("task1_device0"));
DeviceInfo cluster_devices;
coord_service->WaitForAllTasks(task_0, local_devices_0,
[](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_0, local_devices_0,
[](absl::Status s) { ASSERT_OK(s); });
coord_service->WaitForAllTasks(task_1, local_devices_1,
[coord_service = coord_service.get(),
&cluster_devices, &n](absl::Status s) {
ASSERT_OK(s);
cluster_devices =
coord_service->ListClusterDevices();
n.Notify();
});
n.WaitForNotification();
DeviceInfo expected_cluster_devices;
auto expected_devices = expected_cluster_devices.mutable_device();
expected_devices->Add(local_devices_0.device().begin(),
local_devices_0.device().end());
expected_devices->Add(local_devices_1.device().begin(),
local_devices_1.device().end());
EXPECT_THAT(cluster_devices, EqualsProto(expected_cluster_devices));
}
TEST_F(CoordinationBarrierTest, Barrier) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
absl::Notification n_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_FALSE(n_0.HasBeenNotified());
EXPECT_FALSE(n_1.HasBeenNotified());
EXPECT_FALSE(n_2.HasBeenNotified());
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{}, [&barrier_status_2, &n_2](absl::Status s) {
barrier_status_2 = s;
n_2.Notify();
});
EXPECT_TRUE(n_0.HasBeenNotified());
EXPECT_TRUE(n_1.HasBeenNotified());
EXPECT_TRUE(n_2.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinationBarrierTest, BarrierWithSubsetOfTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_TRUE(n_0.HasBeenNotified());
EXPECT_TRUE(n_1.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
}
TEST_F(CoordinationBarrierTest, BarrierWithMismatchedTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(1), GetTask(2)},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonParticipatingTask) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{GetTask(0), GetTask(1)},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonParticipatingTaskThreeTasks) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
n_0.WaitForNotification();
n_1.WaitForNotification();
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{GetTask(0), GetTask(1)},
[&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
EXPECT_THAT(barrier_status_2, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierByNonClusterTask) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Notification n_0;
CoordinatedTask unspecified_task;
unspecified_task.set_job_name("task_from_another_cluster");
unspecified_task.set_task_id(2);
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), unspecified_task},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
n_0.WaitForNotification();
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinationBarrierTest, BarrierTimeout) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status_0, barrier_status_1;
absl::Notification n_0, n_1;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
n_0.WaitForNotification();
n_1.WaitForNotification();
EXPECT_EQ(barrier_status_0, barrier_status_1);
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kDeadlineExceeded));
EXPECT_FALSE(
absl::StrContains(barrier_status_0.message(), GetTaskName(GetTask(0))));
EXPECT_TRUE(
absl::StrContains(barrier_status_0.message(),
GetTaskName(GetTask(1))));
EXPECT_TRUE(absl::StrContains(barrier_status_0.message(),
GetTaskName(GetTask(2))));
EXPECT_TRUE(absl::StrContains(
barrier_status_0.message(),
"2/3"));
}
TEST_F(CoordinationBarrierTest, BarrierReturnsPreviousError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Notification n_0;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
n_0.WaitForNotification();
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
EXPECT_THAT(barrier_status_0, StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(barrier_status_1, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest, BarrierCancelled) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
absl::Status cancelled_status =
GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0));
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kCancelled));
TF_EXPECT_OK(cancelled_status);
}
TEST_F(CoordinationBarrierTest, CancelNonExistentBarrier_FutureBarrierFails) {
const std::string barrier_id = "cancelled_barrier_id";
absl::Duration timeout = absl::Seconds(1);
absl::Status barrier_status;
ASSERT_OK(GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0)));
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kCancelled));
}
TEST_F(CoordinationBarrierTest, CancelAfterBarrierHasPassed) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{},
[&barrier_status_0](absl::Status s) { barrier_status_0 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_1](absl::Status s) { barrier_status_1 = s; });
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{},
[&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
absl::Status cancelled_status =
GetCoordinationService()->CancelBarrier(barrier_id, GetTask(0));
EXPECT_THAT(cancelled_status,
StatusIs(absl::StatusCode::kFailedPrecondition));
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinationBarrierTest, PassedBarrierReturnsImmediately) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Status barrier_status_repeat;
absl::Notification n0;
absl::Notification n1;
absl::Notification n2;
absl::Notification n_repeat;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status_0, &n0](absl::Status s) {
barrier_status_0 = s;
n0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{}, [&barrier_status_1, &n1](absl::Status s) {
barrier_status_1 = s;
n1.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(2),
{}, [&barrier_status_2, &n2](absl::Status s) {
barrier_status_2 = s;
n2.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status_repeat, &n_repeat](absl::Status s) {
barrier_status_repeat = s;
n_repeat.Notify();
});
EXPECT_TRUE(n0.HasBeenNotified());
EXPECT_TRUE(n1.HasBeenNotified());
EXPECT_TRUE(n2.HasBeenNotified());
EXPECT_TRUE(n_repeat.HasBeenNotified());
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
TF_EXPECT_OK(barrier_status_repeat);
}
TEST_F(CoordinationBarrierTest, BarrierFailsIfTaskIsAlreadyInError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{},
[&barrier_status](absl::Status s) { barrier_status = s; });
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest, BarrierFailsUponTaskError) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Notification n0;
absl::Status barrier_status;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{}, [&barrier_status, &n0](absl::Status s) {
barrier_status = s;
n0.Notify();
});
ASSERT_OK(GetCoordinationService()->ReportTaskError(
GetTask(0), absl::InternalError("test_error")));
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinationBarrierTest,
BarrierStillBlocksIfSameTaskCallsOngoingBarrierRepeatedly) {
const std::string barrier_id = "barrier_id";
absl::Duration timeout = absl::Seconds(5);
absl::Status barrier_status_0;
absl::Status barrier_status_1;
absl::Status barrier_status_2;
absl::Notification n_0;
absl::Notification n_1;
absl::Notification n_2;
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_0, &n_0](absl::Status s) {
barrier_status_0 = s;
n_0.Notify();
});
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(0),
{GetTask(0), GetTask(1)},
[&barrier_status_1, &n_1](absl::Status s) {
barrier_status_1 = s;
n_1.Notify();
});
EXPECT_FALSE(n_0.HasBeenNotified());
EXPECT_FALSE(n_1.HasBeenNotified());
GetCoordinationService()->BarrierAsync(
barrier_id, timeout, GetTask(1),
{GetTask(0), GetTask(1)},
[&barrier_status_2, &n_2](absl::Status s) {
barrier_status_2 = s;
n_2.Notify();
});
TF_EXPECT_OK(barrier_status_0);
TF_EXPECT_OK(barrier_status_1);
TF_EXPECT_OK(barrier_status_2);
}
TEST_F(CoordinateTwoTasksTest, ResetAndRegisterAgain) {
EnableCoordinationService();
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
}
TEST_F(CoordinateTwoTasksTest, Reset_HeartbeatsAreAcceptedForAGracePeriod) {
EnableCoordinationService();
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(3 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinateTwoTasksTest, Reset_FailsOngoingBarrier) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status barrier_status;
absl::Notification barrier_n;
coord_service_->BarrierAsync("ongoing_barrier", absl::InfiniteDuration(),
task_0_,
{},
[&barrier_status, &barrier_n](absl::Status s) {
barrier_status = s;
barrier_n.Notify();
});
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
EXPECT_TRUE(barrier_n.HasBeenNotified());
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, Shutdown_HeartbeatsAreAcceptedForAGracePeriod) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_, [&n](absl::Status s) {
TF_EXPECT_OK(s);
n.Notify();
});
n.WaitForNotification();
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(3 * kHeartbeatTimeout));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_F(CoordinateTwoTasksTest, Shutdown_FailsOngoingBarrier) {
EnableCoordinationService(true,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status barrier_status;
absl::Notification barrier_n;
coord_service_->BarrierAsync("ongoing_barrier", absl::InfiniteDuration(),
task_0_,
{},
[&barrier_status, &barrier_n](absl::Status s) {
barrier_status = s;
barrier_n.Notify();
});
absl::Notification shutdown_n;
coord_service_->ShutdownTaskAsync(task_0_, [&shutdown_n](absl::Status s) {
TF_EXPECT_OK(s);
shutdown_n.Notify();
});
shutdown_n.WaitForNotification();
EXPECT_TRUE(barrier_n.HasBeenNotified());
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, ShutdownWithBarrier_BarrierSucceeds) {
EnableCoordinationService(true,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Status barrier_status_2;
coord_service_->ShutdownTaskAsync(
task_0_, [&barrier_status](absl::Status s) { barrier_status = s; });
coord_service_->ShutdownTaskAsync(
task_1_, [&barrier_status_2](absl::Status s) { barrier_status_2 = s; });
TF_EXPECT_OK(barrier_status);
TF_EXPECT_OK(barrier_status_2);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
}
TEST_F(CoordinateTwoTasksTest,
ShutdownWithBarrier_BarrierFails_TaskDisconnectsOtherTaskIsAlerted) {
EnableCoordinationService(true,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_,
[&n, &barrier_status](absl::Status s) {
barrier_status = s;
n.Notify();
});
n.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kDeadlineExceeded));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
absl::Status other_task_status = client_1_.GetStatus();
EXPECT_THAT(other_task_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
ShutdownWithBarrier_BarrierFailsWithoutClientConnection_ServiceStops) {
EnableCoordinationService(false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status barrier_status;
absl::Notification n;
coord_service_->ShutdownTaskAsync(task_0_,
[&n, &barrier_status](absl::Status s) {
barrier_status = s;
n.Notify();
});
n.WaitForNotification();
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(absl::Seconds(1)));
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kDeadlineExceeded));
absl::Status s = coord_service_->RecordHeartbeat(task_1_, incarnation_1_);
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{}, [&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierFailsAfterErrorPollingResponse) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0, n1;
absl::Status s0, s1;
coord_service_->PollForErrorAsync(task_0_, [&](const absl::Status& status) {
s0 = status;
n0.Notify();
});
coord_service_->PollForErrorAsync(task_1_, [&](const absl::Status& status) {
s1 = status;
n1.Notify();
});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
n0.WaitForNotification();
n1.WaitForNotification();
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kUnavailable));
EXPECT_THAT(s1, StatusIs(absl::StatusCode::kUnavailable));
absl::Notification n_barrier;
absl::Status barrier_status;
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{}, [&](absl::Status s) {
barrier_status = s;
n_barrier.Notify();
});
n_barrier.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, BarrierWithSubsetFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{task_0_},
[&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest,
BarrierWithNonParticipatingTaskFailsIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Notification n0;
absl::Status barrier_status;
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->BarrierAsync("barrier_id", absl::Seconds(5), task_0_,
{task_1_},
[&](absl::Status s) {
barrier_status = s;
n0.Notify();
});
n0.WaitForNotification();
EXPECT_THAT(barrier_status, StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, UnrecoverableTaskPropagatesError) {
EnableCoordinationService(true,
false,
false);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(client_1_.GetStatus(), StatusIs(absl::StatusCode::kInternal));
}
TEST_F(CoordinateTwoTasksTest, RecoverableTaskWillNotPropagateError) {
EnableCoordinationService(true,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
TF_EXPECT_OK(client_1_.GetStatus());
}
TEST_F(CoordinateTwoTasksTest,
RecoverableTaskReportErrorResetAndRegisterAgain) {
EnableCoordinationService(true,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
EXPECT_THAT(coord_service_->RecordHeartbeat(task_0_, incarnation_0_),
StatusIs(absl::StatusCode::kInternal));
TF_EXPECT_OK(client_1_.GetStatus());
TF_EXPECT_OK(coord_service_->ResetTask(task_0_));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_new_));
TF_EXPECT_OK(coord_service_->RecordHeartbeat(task_0_, incarnation_0_new_));
TF_EXPECT_OK(client_1_.GetStatus());
}
TEST_F(CoordinateTwoTasksTest, UnavailableTaskCanReconnect) {
EnableCoordinationService(true,
false,
false,
true);
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ReportTaskError(
task_0_, MakeCoordinationError(absl::UnavailableError("test_error"))));
TF_EXPECT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_new_));
}
TEST_F(CoordinateTwoTasksTest,
DoNotAllowPollForErrorIfHasServiceToClientConnection) {
EnableCoordinationService(true);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
for (const CoordinatedTask& task : {task_0_, task_1_}) {
coord_service_->PollForErrorAsync(
task, [&](const absl::Status& status) { statuses.push_back(status); });
}
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kInternal)));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfNotInCluster) {
EnableCoordinationService(false);
CoordinatedTask task_not_in_cluster;
absl::Status s;
coord_service_->PollForErrorAsync(
task_not_in_cluster, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not in the cluster")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfTaskNotRegistered) {
EnableCoordinationService(false);
absl::Status s;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has not been registered")));
}
TEST_F(CoordinateTwoTasksTest,
AllowPollForErrorWithinGracePeriodIfTaskHasShutDown) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->ShutdownTaskAsync(task_0_,
[&](const absl::Status& status) {});
coord_service_->ShutdownTaskAsync(task_1_,
[&](const absl::Status& status) {});
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
coord_service_.reset();
EXPECT_THAT(s, StatusIs(absl::StatusCode::kCancelled));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfTaskHasShutDown) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
coord_service_->ShutdownTaskAsync(task_0_,
[&](const absl::Status& status) {});
coord_service_->ShutdownTaskAsync(task_1_,
[&](const absl::Status& status) {});
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has disconnected")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorAfterReset) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ResetTask(task_0_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("has disconnected")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorWhenInErrorState) {
EnableCoordinationService(false);
absl::Status s;
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->ReportTaskError(task_0_,
absl::InternalError("test_error")));
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("test_error")));
}
TEST_F(CoordinateTwoTasksTest, DoNotAllowPollForErrorIfServiceHasStopped) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
Env::Default()->SleepForMicroseconds(
absl::ToInt64Microseconds(2 * kHeartbeatTimeout));
absl::Status s;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s = status; });
EXPECT_THAT(s, StatusIs(absl::StatusCode::kInternal,
HasSubstr("service has shut down")));
}
TEST_F(CoordinateTwoTasksTest,
CanPropagateTaskRegistrationErrorThroughErrorPolling) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
absl::Status s0;
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { s0 = status; });
ASSERT_THAT(coord_service_->RegisterTask(task_1_, incarnation_0_),
StatusIs(absl::StatusCode::kAborted));
EXPECT_THAT(s0, StatusIs(absl::StatusCode::kAborted));
}
TEST_F(CoordinateTwoTasksTest, LatePollingTaskCanGetError) {
EnableCoordinationService(false);
ASSERT_OK(coord_service_->RegisterTask(task_0_, incarnation_0_));
ASSERT_OK(coord_service_->RegisterTask(task_1_, incarnation_1_));
std::vector<absl::Status> statuses;
statuses.reserve(2);
coord_service_->PollForErrorAsync(
task_0_, [&](const absl::Status& status) { statuses.push_back(status); });
ASSERT_OK(coord_service_->ReportTaskError(
task_0_, absl::FailedPreconditionError("test_error_from_task_0")));
coord_service_->PollForErrorAsync(
task_1_, [&](const absl::Status& status) { statuses.push_back(status); });
EXPECT_EQ(statuses.size(), 2);
EXPECT_THAT(statuses, Each(StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("test_error_from_task_0"))));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/distributed_runtime/coordination/coordination_service_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f3814bc-04c6-4fb0-bea8-19533f866030 | cpp | abseil/abseil-cpp | parser | absl/strings/internal/str_format/parser.cc | absl/strings/internal/str_format/parser_test.cc | #include "absl/strings/internal/str_format/parser.h"
#include <assert.h>
#include <string.h>
#include <wchar.h>
#include <cctype>
#include <cstdint>
#include <algorithm>
#include <initializer_list>
#include <limits>
#include <ostream>
#include <string>
#include <unordered_set>
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
constexpr ConvTag ConvTagHolder::value[256];
ABSL_ATTRIBUTE_NOINLINE const char* ConsumeUnboundConversionNoInline(
const char* p, const char* end, UnboundConversion* conv, int* next_arg) {
return ConsumeUnboundConversion(p, end, conv, next_arg);
}
std::string LengthModToString(LengthMod v) {
switch (v) {
case LengthMod::h:
return "h";
case LengthMod::hh:
return "hh";
case LengthMod::l:
return "l";
case LengthMod::ll:
return "ll";
case LengthMod::L:
return "L";
case LengthMod::j:
return "j";
case LengthMod::z:
return "z";
case LengthMod::t:
return "t";
case LengthMod::q:
return "q";
case LengthMod::none:
return "";
}
return "";
}
struct ParsedFormatBase::ParsedFormatConsumer {
explicit ParsedFormatConsumer(ParsedFormatBase *parsedformat)
: parsed(parsedformat), data_pos(parsedformat->data_.get()) {}
bool Append(string_view s) {
if (s.empty()) return true;
size_t text_end = AppendText(s);
if (!parsed->items_.empty() && !parsed->items_.back().is_conversion) {
parsed->items_.back().text_end = text_end;
} else {
parsed->items_.push_back({false, text_end, {}});
}
return true;
}
bool ConvertOne(const UnboundConversion &conv, string_view s) {
size_t text_end = AppendText(s);
parsed->items_.push_back({true, text_end, conv});
return true;
}
size_t AppendText(string_view s) {
memcpy(data_pos, s.data(), s.size());
data_pos += s.size();
return static_cast<size_t>(data_pos - parsed->data_.get());
}
ParsedFormatBase *parsed;
char* data_pos;
};
ParsedFormatBase::ParsedFormatBase(
string_view format, bool allow_ignored,
std::initializer_list<FormatConversionCharSet> convs)
: data_(format.empty() ? nullptr : new char[format.size()]) {
has_error_ = !ParseFormatString(format, ParsedFormatConsumer(this)) ||
!MatchesConversions(allow_ignored, convs);
}
bool ParsedFormatBase::MatchesConversions(
bool allow_ignored,
std::initializer_list<FormatConversionCharSet> convs) const {
std::unordered_set<int> used;
auto add_if_valid_conv = [&](int pos, char c) {
if (static_cast<size_t>(pos) > convs.size() ||
!Contains(convs.begin()[pos - 1], c))
return false;
used.insert(pos);
return true;
};
for (const ConversionItem &item : items_) {
if (!item.is_conversion) continue;
auto &conv = item.conv;
if (conv.precision.is_from_arg() &&
!add_if_valid_conv(conv.precision.get_from_arg(), '*'))
return false;
if (conv.width.is_from_arg() &&
!add_if_valid_conv(conv.width.get_from_arg(), '*'))
return false;
if (!add_if_valid_conv(conv.arg_position,
FormatConversionCharToChar(conv.conv)))
return false;
}
return used.size() == convs.size() || allow_ignored;
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/str_format/parser.h"
#include <string.h>
#include <algorithm>
#include <initializer_list>
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/strings/internal/str_format/constexpr_parser.h"
#include "absl/strings/internal/str_format/extension.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace str_format_internal {
namespace {
using testing::Pair;
TEST(LengthModTest, Names) {
struct Expectation {
int line;
LengthMod mod;
const char *name;
};
const Expectation kExpect[] = {
{__LINE__, LengthMod::none, "" },
{__LINE__, LengthMod::h, "h" },
{__LINE__, LengthMod::hh, "hh"},
{__LINE__, LengthMod::l, "l" },
{__LINE__, LengthMod::ll, "ll"},
{__LINE__, LengthMod::L, "L" },
{__LINE__, LengthMod::j, "j" },
{__LINE__, LengthMod::z, "z" },
{__LINE__, LengthMod::t, "t" },
{__LINE__, LengthMod::q, "q" },
};
EXPECT_EQ(ABSL_ARRAYSIZE(kExpect), 10);
for (auto e : kExpect) {
SCOPED_TRACE(e.line);
EXPECT_EQ(e.name, LengthModToString(e.mod));
}
}
TEST(ConversionCharTest, Names) {
struct Expectation {
FormatConversionChar id;
char name;
};
const Expectation kExpect[] = {
#define X(c) {FormatConversionCharInternal::c, #c[0]}
X(c), X(s),
X(d), X(i), X(o), X(u), X(x), X(X),
X(f), X(F), X(e), X(E), X(g), X(G), X(a), X(A),
X(n), X(p),
#undef X
{FormatConversionCharInternal::kNone, '\0'},
};
for (auto e : kExpect) {
SCOPED_TRACE(e.name);
FormatConversionChar v = e.id;
EXPECT_EQ(e.name, FormatConversionCharToChar(v));
}
}
class ConsumeUnboundConversionTest : public ::testing::Test {
public:
std::pair<string_view, string_view> Consume(string_view src) {
int next = 0;
o = UnboundConversion();
const char* p = ConsumeUnboundConversion(
src.data(), src.data() + src.size(), &o, &next);
if (!p) return {{}, src};
return {string_view(src.data(), p - src.data()),
string_view(p, src.data() + src.size() - p)};
}
bool Run(const char *fmt, bool force_positional = false) {
int next = force_positional ? -1 : 0;
o = UnboundConversion();
return ConsumeUnboundConversion(fmt, fmt + strlen(fmt), &o, &next) ==
fmt + strlen(fmt);
}
UnboundConversion o;
};
TEST_F(ConsumeUnboundConversionTest, ConsumeSpecification) {
struct Expectation {
int line;
string_view src;
string_view out;
string_view src_post;
};
const Expectation kExpect[] = {
{__LINE__, "", "", "" },
{__LINE__, "b", "", "b" },
{__LINE__, "ba", "", "ba"},
{__LINE__, "l", "", "l" },
{__LINE__, "d", "d", "" },
{__LINE__, "v", "v", "" },
{__LINE__, "d ", "d", " " },
{__LINE__, "dd", "d", "d" },
{__LINE__, "d9", "d", "9" },
{__LINE__, "dzz", "d", "zz"},
{__LINE__, "3v", "", "3v"},
{__LINE__, "hv", "", "hv"},
{__LINE__, "1$v", "1$v", ""},
{__LINE__, "1$*2$d", "1$*2$d", "" },
{__LINE__, "0-14.3hhd", "0-14.3hhd", ""},
{__LINE__, " 0-+#14.3hhd", " 0-+#14.3hhd", ""},
};
for (const auto& e : kExpect) {
SCOPED_TRACE(e.line);
EXPECT_THAT(Consume(e.src), Pair(e.out, e.src_post));
}
}
TEST_F(ConsumeUnboundConversionTest, BasicConversion) {
EXPECT_FALSE(Run(""));
EXPECT_FALSE(Run("z"));
EXPECT_FALSE(Run("dd"));
EXPECT_TRUE(Run("d"));
EXPECT_EQ('d', FormatConversionCharToChar(o.conv));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_LT(o.width.value(), 0);
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_LT(o.precision.value(), 0);
EXPECT_EQ(1, o.arg_position);
}
TEST_F(ConsumeUnboundConversionTest, ArgPosition) {
EXPECT_TRUE(Run("d"));
EXPECT_EQ(1, o.arg_position);
EXPECT_TRUE(Run("3$d"));
EXPECT_EQ(3, o.arg_position);
EXPECT_TRUE(Run("1$d"));
EXPECT_EQ(1, o.arg_position);
EXPECT_TRUE(Run("1$d", true));
EXPECT_EQ(1, o.arg_position);
EXPECT_TRUE(Run("123$d"));
EXPECT_EQ(123, o.arg_position);
EXPECT_TRUE(Run("123$d", true));
EXPECT_EQ(123, o.arg_position);
EXPECT_TRUE(Run("10$d"));
EXPECT_EQ(10, o.arg_position);
EXPECT_TRUE(Run("10$d", true));
EXPECT_EQ(10, o.arg_position);
EXPECT_FALSE(Run("0$d"));
EXPECT_FALSE(Run("0$d", true));
EXPECT_FALSE(Run("1$*0$d"));
EXPECT_FALSE(Run("1$.*0$d"));
EXPECT_FALSE(Run("01$p"));
EXPECT_FALSE(Run("01$p", true));
EXPECT_FALSE(Run("1$*01$p"));
EXPECT_FALSE(Run("1$.*01$p"));
}
TEST_F(ConsumeUnboundConversionTest, WidthAndPrecision) {
EXPECT_TRUE(Run("14d"));
EXPECT_EQ('d', FormatConversionCharToChar(o.conv));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_EQ(14, o.width.value());
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_LT(o.precision.value(), 0);
EXPECT_TRUE(Run("14.d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(14, o.width.value());
EXPECT_EQ(0, o.precision.value());
EXPECT_TRUE(Run(".d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_LT(o.width.value(), 0);
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(0, o.precision.value());
EXPECT_TRUE(Run(".5d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_LT(o.width.value(), 0);
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(5, o.precision.value());
EXPECT_TRUE(Run(".0d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_LT(o.width.value(), 0);
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(0, o.precision.value());
EXPECT_TRUE(Run("14.5d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(14, o.width.value());
EXPECT_EQ(5, o.precision.value());
EXPECT_TRUE(Run("*.*d"));
EXPECT_TRUE(o.width.is_from_arg());
EXPECT_EQ(1, o.width.get_from_arg());
EXPECT_TRUE(o.precision.is_from_arg());
EXPECT_EQ(2, o.precision.get_from_arg());
EXPECT_EQ(3, o.arg_position);
EXPECT_TRUE(Run("*d"));
EXPECT_TRUE(o.width.is_from_arg());
EXPECT_EQ(1, o.width.get_from_arg());
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_LT(o.precision.value(), 0);
EXPECT_EQ(2, o.arg_position);
EXPECT_TRUE(Run(".*d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_LT(o.width.value(), 0);
EXPECT_TRUE(o.precision.is_from_arg());
EXPECT_EQ(1, o.precision.get_from_arg());
EXPECT_EQ(2, o.arg_position);
EXPECT_FALSE(Run("*23$.*34$d"));
EXPECT_TRUE(Run("12$*23$.*34$d"));
EXPECT_EQ(12, o.arg_position);
EXPECT_TRUE(o.width.is_from_arg());
EXPECT_EQ(23, o.width.get_from_arg());
EXPECT_TRUE(o.precision.is_from_arg());
EXPECT_EQ(34, o.precision.get_from_arg());
EXPECT_TRUE(Run("2$*5$.*9$d"));
EXPECT_EQ(2, o.arg_position);
EXPECT_TRUE(o.width.is_from_arg());
EXPECT_EQ(5, o.width.get_from_arg());
EXPECT_TRUE(o.precision.is_from_arg());
EXPECT_EQ(9, o.precision.get_from_arg());
EXPECT_FALSE(Run(".*0$d")) << "no arg 0";
EXPECT_TRUE(Run("999999999.999999999d"));
EXPECT_FALSE(o.width.is_from_arg());
EXPECT_EQ(999999999, o.width.value());
EXPECT_FALSE(o.precision.is_from_arg());
EXPECT_EQ(999999999, o.precision.value());
EXPECT_FALSE(Run("1000000000.999999999d"));
EXPECT_FALSE(Run("999999999.1000000000d"));
EXPECT_FALSE(Run("9999999999d"));
EXPECT_FALSE(Run(".9999999999d"));
}
TEST_F(ConsumeUnboundConversionTest, Flags) {
static const char kAllFlags[] = "-+ #0";
static const int kNumFlags = ABSL_ARRAYSIZE(kAllFlags) - 1;
for (int rev = 0; rev < 2; ++rev) {
for (int i = 0; i < 1 << kNumFlags; ++i) {
std::string fmt;
for (int k = 0; k < kNumFlags; ++k)
if ((i >> k) & 1) fmt += kAllFlags[k];
if (rev == 1) {
std::reverse(fmt.begin(), fmt.end());
}
fmt += 'd';
SCOPED_TRACE(fmt);
EXPECT_TRUE(Run(fmt.c_str()));
EXPECT_EQ(fmt.find('-') == std::string::npos,
!FlagsContains(o.flags, Flags::kLeft));
EXPECT_EQ(fmt.find('+') == std::string::npos,
!FlagsContains(o.flags, Flags::kShowPos));
EXPECT_EQ(fmt.find(' ') == std::string::npos,
!FlagsContains(o.flags, Flags::kSignCol));
EXPECT_EQ(fmt.find('#') == std::string::npos,
!FlagsContains(o.flags, Flags::kAlt));
EXPECT_EQ(fmt.find('0') == std::string::npos,
!FlagsContains(o.flags, Flags::kZero));
}
}
}
TEST_F(ConsumeUnboundConversionTest, BasicFlag) {
for (const char* fmt : {"d", "llx", "G", "1$X"}) {
SCOPED_TRACE(fmt);
EXPECT_TRUE(Run(fmt));
EXPECT_EQ(o.flags, Flags::kBasic);
}
for (const char* fmt : {"3d", ".llx", "-G", "1$#X", "lc"}) {
SCOPED_TRACE(fmt);
EXPECT_TRUE(Run(fmt));
EXPECT_NE(o.flags, Flags::kBasic);
}
}
TEST_F(ConsumeUnboundConversionTest, LengthMod) {
EXPECT_TRUE(Run("d"));
EXPECT_EQ(LengthMod::none, o.length_mod);
EXPECT_TRUE(Run("hd"));
EXPECT_EQ(LengthMod::h, o.length_mod);
EXPECT_TRUE(Run("hhd"));
EXPECT_EQ(LengthMod::hh, o.length_mod);
EXPECT_TRUE(Run("ld"));
EXPECT_EQ(LengthMod::l, o.length_mod);
EXPECT_TRUE(Run("lld"));
EXPECT_EQ(LengthMod::ll, o.length_mod);
EXPECT_TRUE(Run("Lf"));
EXPECT_EQ(LengthMod::L, o.length_mod);
EXPECT_TRUE(Run("qf"));
EXPECT_EQ(LengthMod::q, o.length_mod);
EXPECT_TRUE(Run("jd"));
EXPECT_EQ(LengthMod::j, o.length_mod);
EXPECT_TRUE(Run("zd"));
EXPECT_EQ(LengthMod::z, o.length_mod);
EXPECT_TRUE(Run("td"));
EXPECT_EQ(LengthMod::t, o.length_mod);
}
struct SummarizeConsumer {
std::string* out;
explicit SummarizeConsumer(std::string* out) : out(out) {}
bool Append(string_view s) {
*out += "[" + std::string(s) + "]";
return true;
}
bool ConvertOne(const UnboundConversion& conv, string_view s) {
*out += "{";
*out += std::string(s);
*out += ":";
*out += std::to_string(conv.arg_position) + "$";
if (conv.width.is_from_arg()) {
*out += std::to_string(conv.width.get_from_arg()) + "$*";
}
if (conv.precision.is_from_arg()) {
*out += "." + std::to_string(conv.precision.get_from_arg()) + "$*";
}
*out += FormatConversionCharToChar(conv.conv);
*out += "}";
return true;
}
};
std::string SummarizeParsedFormat(const ParsedFormatBase& pc) {
std::string out;
if (!pc.ProcessFormat(SummarizeConsumer(&out))) out += "!";
return out;
}
class ParsedFormatTest : public testing::Test {};
TEST_F(ParsedFormatTest, ValueSemantics) {
ParsedFormatBase p1({}, true, {});
EXPECT_EQ("", SummarizeParsedFormat(p1));
ParsedFormatBase p2 = p1;
EXPECT_EQ(SummarizeParsedFormat(p1), SummarizeParsedFormat(p2));
p1 = ParsedFormatBase("hello%s", true,
{FormatConversionCharSetInternal::s});
EXPECT_EQ("[hello]{s:1$s}", SummarizeParsedFormat(p1));
ParsedFormatBase p3 = p1;
EXPECT_EQ(SummarizeParsedFormat(p1), SummarizeParsedFormat(p3));
using std::swap;
swap(p1, p2);
EXPECT_EQ("", SummarizeParsedFormat(p1));
EXPECT_EQ("[hello]{s:1$s}", SummarizeParsedFormat(p2));
swap(p1, p2);
p2 = p1;
EXPECT_EQ(SummarizeParsedFormat(p1), SummarizeParsedFormat(p2));
}
struct ExpectParse {
const char* in;
std::initializer_list<FormatConversionCharSet> conv_set;
const char* out;
};
TEST_F(ParsedFormatTest, Parsing) {
const ExpectParse kExpect[] = {
{"", {}, ""},
{"ab", {}, "[ab]"},
{"a%d", {FormatConversionCharSetInternal::d}, "[a]{d:1$d}"},
{"a%+d", {FormatConversionCharSetInternal::d}, "[a]{+d:1$d}"},
{"a% d", {FormatConversionCharSetInternal::d}, "[a]{ d:1$d}"},
{"a%b %d", {}, "[a]!"},
};
for (const auto& e : kExpect) {
SCOPED_TRACE(e.in);
EXPECT_EQ(e.out,
SummarizeParsedFormat(ParsedFormatBase(e.in, false, e.conv_set)));
}
}
TEST_F(ParsedFormatTest, ParsingFlagOrder) {
const ExpectParse kExpect[] = {
{"a%+ 0d", {FormatConversionCharSetInternal::d}, "[a]{+ 0d:1$d}"},
{"a%+0 d", {FormatConversionCharSetInternal::d}, "[a]{+0 d:1$d}"},
{"a%0+ d", {FormatConversionCharSetInternal::d}, "[a]{0+ d:1$d}"},
{"a% +0d", {FormatConversionCharSetInternal::d}, "[a]{ +0d:1$d}"},
{"a%0 +d", {FormatConversionCharSetInternal::d}, "[a]{0 +d:1$d}"},
{"a% 0+d", {FormatConversionCharSetInternal::d}, "[a]{ 0+d:1$d}"},
{"a%+ 0+d", {FormatConversionCharSetInternal::d}, "[a]{+ 0+d:1$d}"},
};
for (const auto& e : kExpect) {
SCOPED_TRACE(e.in);
EXPECT_EQ(e.out,
SummarizeParsedFormat(ParsedFormatBase(e.in, false, e.conv_set)));
}
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/parser.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/str_format/parser_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8605120a-7d6a-41a7-8cb7-70a7ce52e7d8 | cpp | google/cel-cpp | type_pool | common/types/type_pool.cc | common/types/type_pool_test.cc | #include "common/types/type_pool.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "common/type.h"
namespace cel::common_internal {
StructType TypePool::MakeStructType(absl::string_view name) {
ABSL_DCHECK(!IsWellKnownMessageType(name)) << name;
if (ABSL_PREDICT_FALSE(name.empty())) {
return StructType();
}
if (const auto* descriptor = descriptors_->FindMessageTypeByName(name);
descriptor != nullptr) {
return MessageType(descriptor);
}
return MakeBasicStructType(InternString(name));
}
FunctionType TypePool::MakeFunctionType(const Type& result,
absl::Span<const Type> args) {
absl::MutexLock lock(&functions_mutex_);
return functions_.InternFunctionType(result, args);
}
ListType TypePool::MakeListType(const Type& element) {
if (element.IsDyn()) {
return ListType();
}
absl::MutexLock lock(&lists_mutex_);
return lists_.InternListType(element);
}
MapType TypePool::MakeMapType(const Type& key, const Type& value) {
if (key.IsDyn() && value.IsDyn()) {
return MapType();
}
if (key.IsString() && value.IsDyn()) {
return JsonMapType();
}
absl::MutexLock lock(&maps_mutex_);
return maps_.InternMapType(key, value);
}
OpaqueType TypePool::MakeOpaqueType(absl::string_view name,
absl::Span<const Type> parameters) {
if (name == OptionalType::kName) {
if (parameters.size() == 1 && parameters.front().IsDyn()) {
return OptionalType();
}
name = OptionalType::kName;
} else {
name = InternString(name);
}
absl::MutexLock lock(&opaques_mutex_);
return opaques_.InternOpaqueType(name, parameters);
}
OptionalType TypePool::MakeOptionalType(const Type& parameter) {
return MakeOpaqueType(OptionalType::kName, absl::MakeConstSpan(¶meter, 1))
.GetOptional();
}
TypeParamType TypePool::MakeTypeParamType(absl::string_view name) {
return TypeParamType(InternString(name));
}
TypeType TypePool::MakeTypeType(const Type& type) {
absl::MutexLock lock(&types_mutex_);
return types_.InternTypeType(type);
}
absl::string_view TypePool::InternString(absl::string_view string) {
absl::MutexLock lock(&strings_mutex_);
return strings_.InternString(string);
}
} | #include "common/types/type_pool.h"
#include "common/type.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "google/protobuf/arena.h"
namespace cel::common_internal {
namespace {
using ::cel::internal::GetTestingDescriptorPool;
using ::testing::_;
TEST(TypePool, MakeStructType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeStructType("foo.Bar"),
MakeBasicStructType("foo.Bar"));
EXPECT_TRUE(
type_pool.MakeStructType("google.api.expr.test.v1.proto3.TestAllTypes")
.IsMessage());
EXPECT_DEBUG_DEATH(
static_cast<void>(type_pool.MakeStructType("google.protobuf.BoolValue")),
_);
}
TEST(TypePool, MakeFunctionType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeFunctionType(BoolType(), {IntType(), IntType()}),
FunctionType(&arena, BoolType(), {IntType(), IntType()}));
}
TEST(TypePool, MakeListType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeListType(DynType()), ListType());
EXPECT_EQ(type_pool.MakeListType(DynType()), JsonListType());
EXPECT_EQ(type_pool.MakeListType(StringType()),
ListType(&arena, StringType()));
}
TEST(TypePool, MakeMapType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeMapType(DynType(), DynType()), MapType());
EXPECT_EQ(type_pool.MakeMapType(StringType(), DynType()), JsonMapType());
EXPECT_EQ(type_pool.MakeMapType(StringType(), StringType()),
MapType(&arena, StringType(), StringType()));
}
TEST(TypePool, MakeOpaqueType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeOpaqueType("custom_type", {DynType(), DynType()}),
OpaqueType(&arena, "custom_type", {DynType(), DynType()}));
}
TEST(TypePool, MakeOptionalType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeOptionalType(DynType()), OptionalType());
EXPECT_EQ(type_pool.MakeOptionalType(StringType()),
OptionalType(&arena, StringType()));
}
TEST(TypePool, MakeTypeParamType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeTypeParamType("T"), TypeParamType("T"));
}
TEST(TypePool, MakeTypeType) {
google::protobuf::Arena arena;
TypePool type_pool(GetTestingDescriptorPool(), &arena);
EXPECT_EQ(type_pool.MakeTypeType(BoolType()), TypeType(&arena, BoolType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_pool.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_pool_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
0e36476b-428c-4721-92c0-7e256182bea3 | cpp | tensorflow/tensorflow | kvcache | tensorflow/lite/experimental/genai/kvcache.cc | tensorflow/lite/experimental/genai/kvcache_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/resource/cache_buffer.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace llm {
static const int kPositionTensor = 0;
static const int kKeyTensor = 1;
static const int kValueTensor = 2;
static const int kFullKeyTensor = 0;
static const int kFullValueTensor = 1;
static const int kRequiredNumDimensions = 4;
static const int kDefaultMaxNumCacheEntries = 2048;
static const int kDefaultNumTransformerLayers = 32;
static const int kDefaultTransformerLayerId = 0;
static const int KVCACHE_KEY_RESOURCE = 42;
static const int KVCACHE_VALUE_RESOURCE = 43;
struct OpData {
int num_layers;
int layer_index;
int max_num_entries;
int first_slot_index;
resource::CacheBuffer* key_cache_buffer;
resource::CacheBuffer* value_cache_buffer;
bool is_initialized;
uint8_t* key_cache_ptr;
uint8_t* value_cache_ptr;
};
void* KVCacheInit(TfLiteContext* context, const char* buffer, size_t length) {
OpData* op_data = new OpData();
op_data->max_num_entries = -1;
op_data->num_layers = -1;
op_data->layer_index = -1;
op_data->first_slot_index = -1;
op_data->key_cache_buffer = nullptr;
op_data->value_cache_buffer = nullptr;
op_data->is_initialized = false;
op_data->key_cache_ptr = nullptr;
op_data->value_cache_ptr = nullptr;
return op_data;
}
TfLiteStatus KVCachePrepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
if (!op_data->is_initialized) {
const uint8_t* buffer =
reinterpret_cast<const uint8_t*>(node->custom_initial_data);
const size_t length = node->custom_initial_data_size;
auto flexbuffer_map = flexbuffers::GetRoot(buffer, length).AsMap();
int32_t max_num_entries = flexbuffer_map["kv_cache_max"].AsInt32();
int32_t num_layers = flexbuffer_map["num_layers"].AsInt32();
int32_t layer_index = flexbuffer_map["layer_index"].AsInt32();
op_data->max_num_entries =
max_num_entries > 0 ? max_num_entries : kDefaultMaxNumCacheEntries;
op_data->num_layers =
num_layers > 0 ? num_layers : kDefaultNumTransformerLayers;
op_data->layer_index =
layer_index > 0 ? layer_index : kDefaultTransformerLayerId;
op_data->first_slot_index = 0;
op_data->is_initialized = true;
}
const TfLiteTensor* position;
const TfLiteTensor* key;
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &key));
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TF_LITE_ENSURE_EQ(context, position->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, key->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, value->type, kTfLiteFloat32);
TF_LITE_ENSURE(context, NumDimensions(position) == 1);
TF_LITE_ENSURE(
context, GetTensorShape(position).Dims(0) == GetTensorShape(key).Dims(1));
TF_LITE_ENSURE(context, NumDimensions(key) == kRequiredNumDimensions);
TF_LITE_ENSURE(context, GetTensorShape(key).Dims(0) == 1);
TF_LITE_ENSURE(context, HaveSameShapes(key, value));
TfLiteTensor* kfull;
TfLiteTensor* vfull;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullKeyTensor, &kfull));
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullValueTensor, &vfull));
kfull->allocation_type = kTfLiteCustom;
vfull->allocation_type = kTfLiteCustom;
kfull->type = kTfLiteFloat32;
vfull->type = kTfLiteFloat32;
TfLiteIntArray* input_dims = key->dims;
TfLiteIntArray* kcache_dims = TfLiteIntArrayCopy(input_dims);
TfLiteIntArray* vcache_dims = TfLiteIntArrayCopy(input_dims);
kcache_dims->data[1] = op_data->max_num_entries;
vcache_dims->data[1] = op_data->max_num_entries;
TfLiteIntArray* kcache_buffer_dims = TfLiteIntArrayCreate(5);
kcache_buffer_dims->data[0] = input_dims->data[0];
kcache_buffer_dims->data[1] = op_data->num_layers;
kcache_buffer_dims->data[2] = op_data->max_num_entries;
kcache_buffer_dims->data[3] = input_dims->data[2];
kcache_buffer_dims->data[4] = input_dims->data[3];
TfLiteIntArray* vcache_buffer_dims = TfLiteIntArrayCopy(kcache_buffer_dims);
Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto& resources = subgraph->resources();
if (resources.count(KVCACHE_KEY_RESOURCE) == 0) {
auto* cbuffer = new resource::CacheBuffer();
cbuffer->Initialize(*kcache_buffer_dims);
resources.emplace(KVCACHE_KEY_RESOURCE, cbuffer);
op_data->key_cache_buffer = cbuffer;
} else {
resource::ResourceBase* resourcePtr =
resources.at(KVCACHE_KEY_RESOURCE).get();
resource::CacheBuffer* cbuffer = (resource::CacheBuffer*)(resourcePtr);
op_data->key_cache_buffer = cbuffer;
}
if (resources.count(KVCACHE_VALUE_RESOURCE) == 0) {
auto* cbuffer = new resource::CacheBuffer();
cbuffer->Initialize(*vcache_buffer_dims);
resources.emplace(KVCACHE_VALUE_RESOURCE, cbuffer);
op_data->value_cache_buffer = cbuffer;
} else {
resource::ResourceBase* resourcePtr =
resources.at(KVCACHE_VALUE_RESOURCE).get();
resource::CacheBuffer* cbuffer = (resource::CacheBuffer*)(resourcePtr);
op_data->value_cache_buffer = cbuffer;
}
RuntimeShape shape(GetTensorShape(key));
const int elements_in_one_entry = shape.Dims(2) * shape.Dims(3);
const int elements_in_one_block =
op_data->max_num_entries * elements_in_one_entry;
uint8_t* k_ptr =
reinterpret_cast<uint8_t*>(op_data->key_cache_buffer->GetBuffer());
uint8_t* v_ptr =
reinterpret_cast<uint8_t*>(op_data->value_cache_buffer->GetBuffer());
k_ptr = k_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
v_ptr = v_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
size_t kcache_dims_flatsize = kcache_dims->data[0] * kcache_dims->data[1] *
kcache_dims->data[2] * kcache_dims->data[3];
size_t vcache_dims_flatsize = vcache_dims->data[0] * vcache_dims->data[1] *
vcache_dims->data[2] * vcache_dims->data[3];
RuntimeShape kfull_shape(GetTensorShape(kfull));
RuntimeShape vfull_shape(GetTensorShape(vfull));
if (kfull_shape.FlatSize() > 1 && vfull_shape.FlatSize() > 1) {
TF_LITE_ENSURE_EQ(context, kfull_shape.FlatSize(), kcache_dims_flatsize);
TF_LITE_ENSURE_EQ(context, vfull_shape.FlatSize(), vcache_dims_flatsize);
}
TF_LITE_ENSURE_EQ(context, elements_in_one_block, kcache_dims_flatsize);
TF_LITE_ENSURE_EQ(context, elements_in_one_block, vcache_dims_flatsize);
kfull->data.data = k_ptr;
vfull->data.data = v_ptr;
op_data->key_cache_ptr = k_ptr;
op_data->value_cache_ptr = v_ptr;
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, kfull, kcache_dims));
TF_LITE_ENSURE_OK(context,
context->ResizeTensor(context, vfull, vcache_dims));
TfLiteIntArrayFree(kcache_buffer_dims);
TfLiteIntArrayFree(vcache_buffer_dims);
return kTfLiteOk;
}
void KVCacheFree(TfLiteContext* context, void* buffer) {
delete static_cast<OpData*>(buffer);
}
TfLiteStatus KVCacheEval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* position;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kPositionTensor, &position));
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kKeyTensor, &key));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value));
TfLiteTensor* kfull;
TfLiteTensor* vfull;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullKeyTensor, &kfull));
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kFullValueTensor, &vfull));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
float* key_cache_ptr = op_data->key_cache_buffer->GetBuffer();
float* value_cache_ptr = op_data->value_cache_buffer->GetBuffer();
const int layer_index = op_data->layer_index;
const int64_t max_num_entries = op_data->max_num_entries;
int current_num_entries =
op_data->key_cache_buffer->GetNumEntries(layer_index);
RuntimeShape shape(GetTensorShape(key));
const int64_t num_slots_needed = shape.Dims(1);
const int elements_in_one_entry = shape.Dims(2) * shape.Dims(3);
const int elements_in_one_block =
op_data->max_num_entries * elements_in_one_entry;
const int64_t num_bytes_per_tensor = sizeof(float) * elements_in_one_entry;
uint8_t* k_ptr = reinterpret_cast<uint8_t*>(key_cache_ptr);
uint8_t* v_ptr = reinterpret_cast<uint8_t*>(value_cache_ptr);
k_ptr = k_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
v_ptr = v_ptr + sizeof(float) * op_data->layer_index * elements_in_one_block;
TF_LITE_ENSURE_EQ(context, k_ptr, op_data->key_cache_ptr);
TF_LITE_ENSURE_EQ(context, v_ptr, op_data->value_cache_ptr);
TF_LITE_ENSURE_EQ(context, k_ptr, kfull->data.data);
TF_LITE_ENSURE_EQ(context, v_ptr, vfull->data.data);
const int64_t input_first_idx = position->data.i64[0];
const int64_t input_last_idx = input_first_idx + num_slots_needed - 1;
const int64_t cache_first_slot_idx = op_data->first_slot_index;
const int64_t cache_last_slot_idx =
cache_first_slot_idx + op_data->max_num_entries - 1;
const int slots_to_shift = std::min(
std::max(static_cast<int64_t>(0), input_last_idx - cache_last_slot_idx),
max_num_entries);
int64_t first_slot = input_first_idx - op_data->first_slot_index;
if (first_slot < 0) {
TF_LITE_KERNEL_LOG(
context,
"Can not specify a position before this cache's first slot index of %d",
op_data->first_slot_index);
return kTfLiteError;
}
int64_t byte_offset_for_output = first_slot * num_bytes_per_tensor;
int64_t num_slots_for_output = num_slots_needed;
if (slots_to_shift > 0 && slots_to_shift < max_num_entries) {
byte_offset_for_output = 0;
num_slots_for_output = max_num_entries;
const int bytes_offset =
sizeof(float) * elements_in_one_entry * slots_to_shift;
const int size_bytes_to_shift = sizeof(float) * elements_in_one_entry *
(max_num_entries - slots_to_shift);
memmove(k_ptr, k_ptr + bytes_offset, size_bytes_to_shift);
memmove(v_ptr, v_ptr + bytes_offset, size_bytes_to_shift);
}
op_data->first_slot_index = op_data->first_slot_index + slots_to_shift;
first_slot = input_first_idx - op_data->first_slot_index;
const int64_t bytes_offset_for_cache = first_slot * num_bytes_per_tensor;
memcpy(k_ptr + bytes_offset_for_cache, key->data.data, key->bytes);
memcpy(v_ptr + bytes_offset_for_cache, value->data.data, value->bytes);
current_num_entries =
std::min(first_slot + num_slots_needed, max_num_entries);
op_data->key_cache_buffer->SetNumEntries(layer_index, current_num_entries);
op_data->value_cache_buffer->SetNumEntries(layer_index, current_num_entries);
return kTfLiteOk;
}
}
TfLiteRegistration* Register_KV_CACHE() {
static TfLiteRegistration r = {llm::KVCacheInit, llm::KVCacheFree,
llm::KVCachePrepare, llm::KVCacheEval};
return &r;
}
}
}
} | #include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/experimental/genai/genai_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
static const int kDefaultMaxNumCacheEntries = 2048;
class SimpleCacheOpModel : public SingleOpModel {
public:
SimpleCacheOpModel(const TensorData& pos_tensor, const TensorData& k_tensor,
const TensorData& v_tensor) {
pos_ = AddInput(pos_tensor);
k_ = AddInput(k_tensor);
v_ = AddInput(v_tensor);
kfull_ = AddOutput(k_tensor.type);
vfull_ = AddOutput(v_tensor.type);
SetCustomOp("KV_Cache", {}, ops::custom::Register_KV_CACHE);
BuildInterpreter({GetShape(pos_), GetShape(k_), GetShape(v_)});
}
void SetPosition(const std::vector<int64_t>& data) {
PopulateTensor(pos_, data);
}
void SetKey(const std::vector<float>& data) { PopulateTensor(k_, data); }
void SetValue(const std::vector<float>& data) { PopulateTensor(v_, data); }
void ResizePosition(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(pos_, dims);
}
void ResizeKey(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(k_, dims);
}
void ResizeValue(const std::vector<int>& dims) {
interpreter_->ResizeInputTensor(v_, dims);
}
std::vector<float> GetFullK() {
const auto output = ExtractVector<float>(kfull_);
return output;
}
std::vector<float> GetFullV() {
const auto output = ExtractVector<float>(vfull_);
return output;
}
TfLiteStatus ReAllocate() { return interpreter_->AllocateTensors(); }
protected:
int pos_;
int k_;
int v_;
int kfull_;
int vfull_;
};
TEST(SimpleCacheOp1Test, BasicTest) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
m.SetKey({{1, 0, -6, 2, 4, 3, 1, 0, -6, 2, 4, 3}});
m.SetValue({{4, 2, -4, 2, 4, 2, 4, 2, -4, 2, 4, 2}});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
}
TEST(SimpleCacheOp2Test, AddToCache) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
std::vector<float> key = {1, 5, -6, 2, 4, 3, 8, 9, -8, 7, 2, 11};
m.SetKey(key);
std::vector<float> value = {2, 3, -4, 5, 6, 7, 1, 8, -12, 11, 14, 21};
m.SetValue(value);
const int key_size = 2 * 3;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
for (int i = 0; i < key.size(); ++i) {
ASSERT_EQ(fullk[i], key[i]);
ASSERT_EQ(fullv[i], value[i]);
}
for (int i = key.size(); i < fullk.size(); ++i) {
ASSERT_EQ(fullk[i], 0.);
ASSERT_EQ(fullv[i], 0.);
}
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
for (int i = 0; i < 510; i++) {
int offset = 2 * i + 2;
m.SetPosition({offset, offset + 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
}
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int i = 0; i < 1022 * key_size; ++i) {
ASSERT_NE(fullv[i], 0);
}
for (int i = 1022 * key_size; i < fullk.size(); ++i) {
ASSERT_EQ(fullv[i], 0);
}
}
TEST(SimpleCacheOp2Test, ShiftSlotsInCache) {
SimpleCacheOpModel m({TensorType_INT64, {2}},
{TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 2, 2, 3}});
m.SetPosition({0, 1});
std::vector<float> key = {1, 5, -6, 2, 4, 3, 2, 6, -7, 3, 5, 4};
m.SetKey(key);
std::vector<float> value = {4, 2, -4, 2, 4, 2, 9, 8, -9, 8, 9, 1};
m.SetValue(value);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<float> fullk = m.GetFullK();
std::vector<float> fullv = m.GetFullV();
for (int i = 0; i < key.size(); ++i) {
ASSERT_EQ(fullk[i], key[i]);
ASSERT_EQ(fullv[i], value[i]);
}
for (int i = key.size(); i < fullk.size(); ++i) {
ASSERT_EQ(fullk[i], 0.);
ASSERT_EQ(fullv[i], 0.);
}
ASSERT_EQ(fullk.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
ASSERT_EQ(fullv.size(), 2 * 3 * kDefaultMaxNumCacheEntries);
for (int i = 0; i < 1023; i++) {
ASSERT_EQ(m.Invoke(), kTfLiteOk);
int offset = 2 * i + 2;
m.SetPosition({offset, offset + 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
}
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int i = 0; i < fullk.size(); ++i) {
ASSERT_NE(fullk[i], 0);
ASSERT_NE(fullv[i], 0);
}
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 6 + j;
int idx = 6 + j;
ASSERT_EQ(fullk[idxfull], key[idx]);
ASSERT_EQ(fullv[idxfull], value[idx]);
}
std::vector<float> key2 = {1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7};
m.SetKey(key2);
std::vector<float> value2 = {8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9};
m.SetValue(value2);
m.SetPosition({2048, 2049});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int j = 0; j < 12; ++j) {
int idxfull = fullk.size() - 12 + j;
ASSERT_EQ(fullk[idxfull], key2[j]);
ASSERT_EQ(fullv[idxfull], value2[j]);
}
m.ResizeKey({1, 1, 2, 3});
m.ResizeValue({1, 1, 2, 3});
m.ResizePosition({1});
m.ReAllocate();
std::vector<float> key3 = {4, 4, 4, 4, 4, 4};
m.SetKey(key3);
std::vector<float> value3 = {2, 2, 2, 2, 2, 2};
m.SetValue(value3);
m.SetPosition({2050});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
fullk = m.GetFullK();
fullv = m.GetFullV();
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 6 + j;
ASSERT_EQ(fullk[idxfull], key3[j]);
ASSERT_EQ(fullv[idxfull], value3[j]);
}
for (int j = 0; j < 6; ++j) {
int idxfull = fullk.size() - 12 + j;
ASSERT_EQ(fullk[idxfull], key2[6 + j]);
ASSERT_EQ(fullv[idxfull], value2[6 + j]);
}
std::vector<float> key4 = {5, 5, 5, 5, 5, 5};
m.SetKey(key3);
std::vector<float> value4 = {3, 3, 3, 3, 3, 3};
m.SetValue(value3);
m.SetPosition({0});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/kvcache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/genai/kvcache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6f0401f6-2870-4250-b739-0255b47e45e4 | cpp | google/quiche | quic_stream | quiche/quic/core/quic_stream.cc | quiche/quic/core/quic_stream_test.cc | #include "quiche/quic/core/quic_stream.h"
#include <algorithm>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_flow_controller.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
using spdy::SpdyPriority;
namespace quic {
#define ENDPOINT \
(perspective_ == Perspective::IS_SERVER ? "Server: " : "Client: ")
namespace {
QuicByteCount DefaultFlowControlWindow(ParsedQuicVersion version) {
if (!version.AllowsLowFlowControlLimits()) {
return kDefaultFlowControlSendWindow;
}
return 0;
}
QuicByteCount GetInitialStreamFlowControlWindowToSend(QuicSession* session,
QuicStreamId stream_id) {
ParsedQuicVersion version = session->connection()->version();
if (version.handshake_protocol != PROTOCOL_TLS1_3) {
return session->config()->GetInitialStreamFlowControlWindowToSend();
}
if (VersionHasIetfQuicFrames(version.transport_version) &&
!QuicUtils::IsBidirectionalStreamId(stream_id, version)) {
return session->config()
->GetInitialMaxStreamDataBytesUnidirectionalToSend();
}
if (QuicUtils::IsOutgoingStreamId(version, stream_id,
session->perspective())) {
return session->config()
->GetInitialMaxStreamDataBytesOutgoingBidirectionalToSend();
}
return session->config()
->GetInitialMaxStreamDataBytesIncomingBidirectionalToSend();
}
QuicByteCount GetReceivedFlowControlWindow(QuicSession* session,
QuicStreamId stream_id) {
ParsedQuicVersion version = session->connection()->version();
if (version.handshake_protocol != PROTOCOL_TLS1_3) {
if (session->config()->HasReceivedInitialStreamFlowControlWindowBytes()) {
return session->config()->ReceivedInitialStreamFlowControlWindowBytes();
}
return DefaultFlowControlWindow(version);
}
if (VersionHasIetfQuicFrames(version.transport_version) &&
!QuicUtils::IsBidirectionalStreamId(stream_id, version)) {
if (session->config()
->HasReceivedInitialMaxStreamDataBytesUnidirectional()) {
return session->config()
->ReceivedInitialMaxStreamDataBytesUnidirectional();
}
return DefaultFlowControlWindow(version);
}
if (QuicUtils::IsOutgoingStreamId(version, stream_id,
session->perspective())) {
if (session->config()
->HasReceivedInitialMaxStreamDataBytesOutgoingBidirectional()) {
return session->config()
->ReceivedInitialMaxStreamDataBytesOutgoingBidirectional();
}
return DefaultFlowControlWindow(version);
}
if (session->config()
->HasReceivedInitialMaxStreamDataBytesIncomingBidirectional()) {
return session->config()
->ReceivedInitialMaxStreamDataBytesIncomingBidirectional();
}
return DefaultFlowControlWindow(version);
}
}
PendingStream::PendingStream(QuicStreamId id, QuicSession* session)
: id_(id),
version_(session->version()),
stream_delegate_(session),
stream_bytes_read_(0),
fin_received_(false),
is_bidirectional_(QuicUtils::GetStreamType(id, session->perspective(),
true,
session->version()) ==
BIDIRECTIONAL),
connection_flow_controller_(session->flow_controller()),
flow_controller_(session, id,
false,
GetReceivedFlowControlWindow(session, id),
GetInitialStreamFlowControlWindowToSend(session, id),
kStreamReceiveWindowLimit,
session->flow_controller()->auto_tune_receive_window(),
session->flow_controller()),
sequencer_(this),
creation_time_(session->GetClock()->ApproximateNow()) {
if (is_bidirectional_) {
QUIC_CODE_COUNT_N(quic_pending_stream, 3, 3);
}
}
void PendingStream::OnDataAvailable() {
}
void PendingStream::OnFinRead() { QUICHE_DCHECK(sequencer_.IsClosed()); }
void PendingStream::AddBytesConsumed(QuicByteCount bytes) {
flow_controller_.AddBytesConsumed(bytes);
connection_flow_controller_->AddBytesConsumed(bytes);
}
void PendingStream::ResetWithError(QuicResetStreamError ) {
QUICHE_NOTREACHED();
}
void PendingStream::OnUnrecoverableError(QuicErrorCode error,
const std::string& details) {
stream_delegate_->OnStreamError(error, details);
}
void PendingStream::OnUnrecoverableError(QuicErrorCode error,
QuicIetfTransportErrorCodes ietf_error,
const std::string& details) {
stream_delegate_->OnStreamError(error, ietf_error, details);
}
QuicStreamId PendingStream::id() const { return id_; }
ParsedQuicVersion PendingStream::version() const { return version_; }
void PendingStream::OnStreamFrame(const QuicStreamFrame& frame) {
QUICHE_DCHECK_EQ(frame.stream_id, id_);
bool is_stream_too_long =
(frame.offset > kMaxStreamLength) ||
(kMaxStreamLength - frame.offset < frame.data_length);
if (is_stream_too_long) {
QUIC_PEER_BUG(quic_peer_bug_12570_1)
<< "Receive stream frame reaches max stream length. frame offset "
<< frame.offset << " length " << frame.data_length;
OnUnrecoverableError(QUIC_STREAM_LENGTH_OVERFLOW,
"Peer sends more data than allowed on this stream.");
return;
}
if (frame.offset + frame.data_length > sequencer_.close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_DATA_BEYOND_CLOSE_OFFSET,
absl::StrCat(
"Stream ", id_,
" received data with offset: ", frame.offset + frame.data_length,
", which is beyond close offset: ", sequencer()->close_offset()));
return;
}
if (frame.fin) {
fin_received_ = true;
}
QuicByteCount frame_payload_size = frame.data_length;
stream_bytes_read_ += frame_payload_size;
if (frame_payload_size > 0 &&
MaybeIncreaseHighestReceivedOffset(frame.offset + frame_payload_size)) {
if (flow_controller_.FlowControlViolation() ||
connection_flow_controller_->FlowControlViolation()) {
OnUnrecoverableError(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Flow control violation after increasing offset");
return;
}
}
sequencer_.OnStreamFrame(frame);
}
void PendingStream::OnRstStreamFrame(const QuicRstStreamFrame& frame) {
QUICHE_DCHECK_EQ(frame.stream_id, id_);
if (frame.byte_offset > kMaxStreamLength) {
OnUnrecoverableError(QUIC_STREAM_LENGTH_OVERFLOW,
"Reset frame stream offset overflow.");
return;
}
const QuicStreamOffset kMaxOffset =
std::numeric_limits<QuicStreamOffset>::max();
if (sequencer()->close_offset() != kMaxOffset &&
frame.byte_offset != sequencer()->close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_MULTIPLE_OFFSET,
absl::StrCat("Stream ", id_,
" received new final offset: ", frame.byte_offset,
", which is different from close offset: ",
sequencer()->close_offset()));
return;
}
MaybeIncreaseHighestReceivedOffset(frame.byte_offset);
if (flow_controller_.FlowControlViolation() ||
connection_flow_controller_->FlowControlViolation()) {
OnUnrecoverableError(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Flow control violation after increasing offset");
return;
}
}
void PendingStream::OnResetStreamAtFrame(const QuicResetStreamAtFrame& frame) {
if (frame.reliable_offset > sequencer()->close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_MULTIPLE_OFFSET,
absl::StrCat(
"Stream ", id_,
" received reliable reset with offset: ", frame.reliable_offset,
" greater than the FIN offset: ", sequencer()->close_offset()));
return;
}
if (buffered_reset_stream_at_.has_value() &&
(frame.reliable_offset > buffered_reset_stream_at_->reliable_offset)) {
return;
}
buffered_reset_stream_at_ = frame;
sequencer_.OnReliableReset(frame.reliable_offset);
}
void PendingStream::OnWindowUpdateFrame(const QuicWindowUpdateFrame& frame) {
QUICHE_DCHECK(is_bidirectional_);
flow_controller_.UpdateSendWindowOffset(frame.max_data);
}
bool PendingStream::MaybeIncreaseHighestReceivedOffset(
QuicStreamOffset new_offset) {
uint64_t increment =
new_offset - flow_controller_.highest_received_byte_offset();
if (!flow_controller_.UpdateHighestReceivedOffset(new_offset)) {
return false;
}
connection_flow_controller_->UpdateHighestReceivedOffset(
connection_flow_controller_->highest_received_byte_offset() + increment);
return true;
}
void PendingStream::OnStopSending(
QuicResetStreamError stop_sending_error_code) {
if (!stop_sending_error_code_) {
stop_sending_error_code_ = stop_sending_error_code;
}
}
void PendingStream::MarkConsumed(QuicByteCount num_bytes) {
sequencer_.MarkConsumed(num_bytes);
}
void PendingStream::StopReading() {
QUIC_DVLOG(1) << "Stop reading from pending stream " << id();
sequencer_.StopReading();
}
QuicStream::QuicStream(PendingStream* pending, QuicSession* session,
bool is_static)
: QuicStream(
pending->id_, session, std::move(pending->sequencer_), is_static,
QuicUtils::GetStreamType(pending->id_, session->perspective(),
true,
session->version()),
pending->stream_bytes_read_, pending->fin_received_,
std::move(pending->flow_controller_),
pending->connection_flow_controller_,
(session->GetClock()->ApproximateNow() - pending->creation_time())) {
QUICHE_DCHECK(session->version().HasIetfQuicFrames());
sequencer_.set_stream(this);
buffered_reset_stream_at_ = pending->buffered_reset_stream_at();
}
namespace {
std::optional<QuicFlowController> FlowController(QuicStreamId id,
QuicSession* session,
StreamType type) {
if (type == CRYPTO) {
return std::nullopt;
}
return QuicFlowController(
session, id,
false,
GetReceivedFlowControlWindow(session, id),
GetInitialStreamFlowControlWindowToSend(session, id),
kStreamReceiveWindowLimit,
session->flow_controller()->auto_tune_receive_window(),
session->flow_controller());
}
}
QuicStream::QuicStream(QuicStreamId id, QuicSession* session, bool is_static,
StreamType type)
: QuicStream(id, session, QuicStreamSequencer(this), is_static, type, 0,
false, FlowController(id, session, type),
session->flow_controller(), QuicTime::Delta::Zero()) {}
QuicStream::QuicStream(QuicStreamId id, QuicSession* session,
QuicStreamSequencer sequencer, bool is_static,
StreamType type, uint64_t stream_bytes_read,
bool fin_received,
std::optional<QuicFlowController> flow_controller,
QuicFlowController* connection_flow_controller,
QuicTime::Delta pending_duration)
: sequencer_(std::move(sequencer)),
id_(id),
session_(session),
stream_delegate_(session),
stream_bytes_read_(stream_bytes_read),
stream_error_(QuicResetStreamError::NoError()),
connection_error_(QUIC_NO_ERROR),
read_side_closed_(false),
write_side_closed_(false),
write_side_data_recvd_state_notified_(false),
fin_buffered_(false),
fin_sent_(false),
fin_outstanding_(false),
fin_lost_(false),
fin_received_(fin_received),
rst_sent_(false),
rst_received_(false),
stop_sending_sent_(false),
flow_controller_(std::move(flow_controller)),
connection_flow_controller_(connection_flow_controller),
stream_contributes_to_connection_flow_control_(true),
busy_counter_(0),
add_random_padding_after_fin_(false),
send_buffer_(
session->connection()->helper()->GetStreamSendBufferAllocator()),
buffered_data_threshold_(GetQuicFlag(quic_buffered_data_threshold)),
is_static_(is_static),
deadline_(QuicTime::Zero()),
was_draining_(false),
type_(VersionHasIetfQuicFrames(session->transport_version()) &&
type != CRYPTO
? QuicUtils::GetStreamType(id_, session->perspective(),
session->IsIncomingStream(id_),
session->version())
: type),
creation_time_(session->connection()->clock()->ApproximateNow()),
pending_duration_(pending_duration),
perspective_(session->perspective()) {
if (type_ == WRITE_UNIDIRECTIONAL) {
fin_received_ = true;
CloseReadSide();
} else if (type_ == READ_UNIDIRECTIONAL) {
fin_sent_ = true;
CloseWriteSide();
}
if (type_ != CRYPTO) {
stream_delegate_->RegisterStreamPriority(id, is_static_, priority_);
}
}
QuicStream::~QuicStream() {
if (session_ != nullptr && IsWaitingForAcks()) {
QUIC_DVLOG(1)
<< ENDPOINT << "Stream " << id_
<< " gets destroyed while waiting for acks. stream_bytes_outstanding = "
<< send_buffer_.stream_bytes_outstanding()
<< ", fin_outstanding: " << fin_outstanding_;
}
if (stream_delegate_ != nullptr && type_ != CRYPTO) {
stream_delegate_->UnregisterStreamPriority(id());
}
}
void QuicStream::OnStreamFrame(const QuicStreamFrame& frame) {
QUICHE_DCHECK_EQ(frame.stream_id, id_);
QUICHE_DCHECK(!(read_side_closed_ && write_side_closed_));
if (frame.fin && is_static_) {
OnUnrecoverableError(QUIC_INVALID_STREAM_ID,
"Attempt to close a static stream");
return;
}
if (type_ == WRITE_UNIDIRECTIONAL) {
OnUnrecoverableError(QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM,
"Data received on write unidirectional stream");
return;
}
bool is_stream_too_long =
(frame.offset > kMaxStreamLength) ||
(kMaxStreamLength - frame.offset < frame.data_length);
if (is_stream_too_long) {
QUIC_PEER_BUG(quic_peer_bug_10586_1)
<< "Receive stream frame on stream " << id_
<< " reaches max stream length. frame offset " << frame.offset
<< " length " << frame.data_length << ". " << sequencer_.DebugString();
OnUnrecoverableError(
QUIC_STREAM_LENGTH_OVERFLOW,
absl::StrCat("Peer sends more data than allowed on stream ", id_,
". frame: offset = ", frame.offset, ", length = ",
frame.data_length, ". ", sequencer_.DebugString()));
return;
}
if (frame.offset + frame.data_length > sequencer_.close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_DATA_BEYOND_CLOSE_OFFSET,
absl::StrCat(
"Stream ", id_,
" received data with offset: ", frame.offset + frame.data_length,
", which is beyond close offset: ", sequencer_.close_offset()));
return;
}
if (frame.fin && !fin_received_) {
fin_received_ = true;
if (fin_sent_) {
QUICHE_DCHECK(!was_draining_);
session_->StreamDraining(id_,
type_ != BIDIRECTIONAL);
was_draining_ = true;
}
}
if (read_side_closed_) {
QUIC_DLOG(INFO)
<< ENDPOINT << "Stream " << frame.stream_id
<< " is closed for reading. Ignoring newly received stream data.";
return;
}
QuicByteCount frame_payload_size = frame.data_length;
stream_bytes_read_ += frame_payload_size;
if (frame_payload_size > 0 &&
MaybeIncreaseHighestReceivedOffset(frame.offset + frame_payload_size)) {
QUIC_BUG_IF(quic_bug_12570_2, !flow_controller_.has_value())
<< ENDPOINT << "OnStreamFrame called on stream without flow control";
if ((flow_controller_.has_value() &&
flow_controller_->FlowControlViolation()) ||
connection_flow_controller_->FlowControlViolation()) {
OnUnrecoverableError(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Flow control violation after increasing offset");
return;
}
}
sequencer_.OnStreamFrame(frame);
}
bool QuicStream::OnStopSending(QuicResetStreamError error) {
if (write_side_closed() && !IsWaitingForAcks()) {
QUIC_DVLOG(1) << ENDPOINT
<< "Ignoring STOP_SENDING for a write closed stream, id: "
<< id_;
return false;
}
if (is_static_) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received STOP_SENDING for a static stream, id: " << id_
<< " Closing connection";
OnUnrecoverableError(QUIC_INVALID_STREAM_ID,
"Received STOP_SENDING for a static stream");
return false;
}
stream_error_ = error;
MaybeSendRstStream(error);
if (session()->enable_stop_sending_for_zombie_streams() &&
read_side_closed_ && write_side_closed_ && !IsWaitingForAcks()) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_deliver_stop_sending_to_zombie_streams, 3,
3);
session()->MaybeCloseZombieStream(id_);
}
return true;
}
int QuicStream::num_frames_received() const {
return sequencer_.num_frames_received();
}
int QuicStream::num_duplicate_frames_received() const {
return sequencer_.num_duplicate_frames_received();
}
void QuicStream::OnStreamReset(const QuicRstStreamFrame& frame) {
rst_received_ = true;
if (frame.byte_offset > kMaxStreamLength) {
OnUnrecoverableError(QUIC_STREAM_LENGTH_OVERFLOW,
"Reset frame stream offset overflow.");
return;
}
const QuicStreamOffset kMaxOffset =
std::numeric_limits<QuicStreamOffset>::max();
if (sequencer()->close_offset() != kMaxOffset &&
frame.byte_offset != sequencer()->close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_MULTIPLE_OFFSET,
absl::StrCat("Stream ", id_,
" received new final offset: ", frame.byte_offset,
", which is different from close offset: ",
sequencer_.close_offset()));
return;
}
MaybeIncreaseHighestReceivedOffset(frame.byte_offset);
QUIC_BUG_IF(quic_bug_12570_3, !flow_controller_.has_value())
<< ENDPOINT << "OnStreamReset called on stream without flow control";
if ((flow_controller_.has_value() &&
flow_controller_->FlowControlViolation()) ||
connection_flow_controller_->FlowControlViolation()) {
OnUnrecoverableError(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA,
"Flow control violation after increasing offset");
return;
}
stream_error_ = frame.error();
if (!VersionHasIetfQuicFrames(transport_version())) {
CloseWriteSide();
}
CloseReadSide();
}
void QuicStream::OnResetStreamAtFrame(const QuicResetStreamAtFrame& frame) {
if (frame.reliable_offset > sequencer()->close_offset()) {
OnUnrecoverableError(
QUIC_STREAM_MULTIPLE_OFFSET,
absl::StrCat(
"Stream ", id_,
" received reliable reset with offset: ", frame.reliable_offset,
" greater than the FIN offset: ", sequencer()->close_offset()));
return;
}
if (buffered_reset_stream_at_.has_value() &&
(frame.reliable_offset > buffered_reset_stream_at_->reliable_offset)) {
return;
}
buffered_reset_stream_at_ = frame;
MaybeCloseStreamWithBufferedReset();
if (!rst_received_) {
sequencer_.OnReliableReset(frame.reliable_offset);
}
}
void QuicStream::OnConnectionClosed(const QuicConnectionCloseFrame& frame,
ConnectionCloseSource ) {
if (read_side_closed_ && write_side_closed_) {
return;
}
auto error_code = frame.quic_error_code;
if (error_code != QUIC_NO_ERROR) {
stream_error_ =
QuicResetStreamError::FromInternal(QUIC_STREAM_CONNECTION_ERROR);
connection_error_ = error_code;
}
CloseWriteSide();
CloseReadSide();
}
void QuicStream::OnFinRead() {
QUICHE_DCHECK(sequencer_.IsClosed());
fin_received_ = true;
CloseReadSide();
}
void QuicStream::SetFinSent() {
QUICHE_DCHECK(!VersionUsesHttp3(transport_version()));
fin_sent_ = true;
}
void QuicStream::Reset(QuicRstStreamErrorCode error) {
ResetWithError(QuicResetStreamError::FromInternal(error));
}
void QuicStream::ResetWithError(QuicResetStreamError error) {
stream_error_ = error;
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
MaybeSendStopSending(error);
MaybeSendRstStream(error);
if (read_side_closed_ && write_side_closed_ && !IsWaitingForAcks()) {
session()->MaybeCloseZombieStream(id_);
}
}
void QuicStream::ResetWriteSide(QuicResetStreamError error) {
stream_error_ = error;
MaybeSendRstStream(error);
if (read_side_closed_ && write_side_closed_ && !IsWaitingForAcks()) {
session()->MaybeCloseZombieStream(id_);
}
}
void QuicStream::SendStopSending(QuicResetStreamError error) {
stream_error_ = error;
MaybeSendStopSending(error);
if (read_side_closed_ && write_side_closed_ && !IsWaitingForAcks()) {
session()->MaybeCloseZombieStream(id_);
}
}
void QuicStream::OnUnrecoverableError(QuicErrorCode error,
const std::string& details) {
stream_delegate_->OnStreamError(error, details);
}
void QuicStream::OnUnrecoverableError(QuicErrorCode error,
QuicIetfTransportErrorCodes ietf_error,
const std::string& details) {
stream_delegate_->OnStreamError(error, ietf_error, details);
}
const QuicStreamPriority& QuicStream::priority() const { return priority_; }
void QuicStream::SetPriority(const QuicStreamPriority& priority) {
priority_ = priority;
MaybeSendPriorityUpdateFrame();
stream_delegate_->UpdateStreamPriority(id(), priority);
}
void QuicStream::WriteOrBufferData(
absl::string_view data, bool fin,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
ack_listener) {
QUIC_BUG_IF(quic_bug_12570_4,
QuicUtils::IsCryptoStreamId(transport_version(), id_))
<< ENDPOINT
<< "WriteOrBufferData is used to send application data, use "
"WriteOrBufferDataAtLevel to send crypto data.";
return WriteOrBufferDataAtLevel(
data, fin, session()->GetEncryptionLevelToSendApplicationData(),
ack_listener);
}
void QuicStream::WriteOrBufferDataAtLevel(
absl::string_view data, bool fin, EncryptionLevel level,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
ack_listener) {
if (data.empty() && !fin) {
QUIC_BUG(quic_bug_10586_2) << "data.empty() && !fin";
return;
}
if (fin_buffered_) {
QUIC_BUG(quic_bug_10586_3) << "Fin already buffered";
return;
}
if (write_side_closed_) {
QUIC_DLOG(ERROR) << ENDPOINT
<< "Attempt to write when the write side is closed";
if (type_ == READ_UNIDIRECTIONAL) {
OnUnrecoverableError(QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM,
"Try to send data on read unidirectional stream");
}
return;
}
fin_buffered_ = fin;
bool had_buffered_data = HasBufferedData();
if (data.length() > 0) {
QuicStreamOffset offset = send_buffer_.stream_offset();
if (kMaxStreamLength - offset < data.length()) {
QUIC_BUG(quic_bug_10586_4) << "Write too many data via stream " << id_;
OnUnrecoverableError(
QUIC_STREAM_LENGTH_OVERFLOW,
absl::StrCat("Write too many data via stream ", id_));
return;
}
send_buffer_.SaveStreamData(data);
OnDataBuffered(offset, data.length(), ack_listener);
}
if (!had_buffered_data && (HasBufferedData() || fin_buffered_)) {
WriteBufferedData(level);
}
}
void QuicStream::OnCanWrite() {
if (HasDeadlinePassed()) {
OnDeadlinePassed();
return;
}
if (HasPendingRetransmission()) {
WritePendingRetransmission();
return;
}
if (write_side_closed_) {
QUIC_DLOG(ERROR)
<< ENDPOINT << "Stream " << id()
<< " attempting to write new data when the write side is closed";
return;
}
if (HasBufferedData() || (fin_buffered_ && !fin_sent_)) {
WriteBufferedData(session()->GetEncryptionLevelToSendApplicationData());
}
if (!fin_buffered_ && !fin_sent_ && CanWriteNewData()) {
OnCanWriteNewData();
}
}
void QuicStream::MaybeSendBlocked() {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_5)
<< ENDPOINT << "MaybeSendBlocked called on stream without flow control";
return;
}
flow_controller_->MaybeSendBlocked();
if (!stream_contributes_to_connection_flow_control_) {
return;
}
connection_flow_controller_->MaybeSendBlocked();
if (!write_side_closed_ && connection_flow_controller_->IsBlocked() &&
!flow_controller_->IsBlocked()) {
session_->MarkConnectionLevelWriteBlocked(id());
}
}
QuicConsumedData QuicStream::WriteMemSlice(quiche::QuicheMemSlice span,
bool fin) {
return WriteMemSlices(absl::MakeSpan(&span, 1), fin);
}
QuicConsumedData QuicStream::WriteMemSlices(
absl::Span<quiche::QuicheMemSlice> span, bool fin,
bool buffer_unconditionally) {
QuicConsumedData consumed_data(0, false);
if (span.empty() && !fin) {
QUIC_BUG(quic_bug_10586_6) << "span.empty() && !fin";
return consumed_data;
}
if (fin_buffered_) {
QUIC_BUG(quic_bug_10586_7) << "Fin already buffered";
return consumed_data;
}
if (write_side_closed_) {
QUIC_DLOG(ERROR) << ENDPOINT << "Stream " << id()
<< " attempting to write when the write side is closed";
if (type_ == READ_UNIDIRECTIONAL) {
OnUnrecoverableError(QUIC_TRY_TO_WRITE_DATA_ON_READ_UNIDIRECTIONAL_STREAM,
"Try to send data on read unidirectional stream");
}
return consumed_data;
}
bool had_buffered_data = HasBufferedData();
if (CanWriteNewData() || span.empty() || buffer_unconditionally) {
consumed_data.fin_consumed = fin;
if (!span.empty()) {
QuicStreamOffset offset = send_buffer_.stream_offset();
consumed_data.bytes_consumed = send_buffer_.SaveMemSliceSpan(span);
if (offset > send_buffer_.stream_offset() ||
kMaxStreamLength < send_buffer_.stream_offset()) {
QUIC_BUG(quic_bug_10586_8) << "Write too many data via stream " << id_;
OnUnrecoverableError(
QUIC_STREAM_LENGTH_OVERFLOW,
absl::StrCat("Write too many data via stream ", id_));
return consumed_data;
}
OnDataBuffered(offset, consumed_data.bytes_consumed, nullptr);
}
}
fin_buffered_ = consumed_data.fin_consumed;
if (!had_buffered_data && (HasBufferedData() || fin_buffered_)) {
WriteBufferedData(session()->GetEncryptionLevelToSendApplicationData());
}
return consumed_data;
}
bool QuicStream::HasPendingRetransmission() const {
return send_buffer_.HasPendingRetransmission() || fin_lost_;
}
bool QuicStream::IsStreamFrameOutstanding(QuicStreamOffset offset,
QuicByteCount data_length,
bool fin) const {
return send_buffer_.IsStreamDataOutstanding(offset, data_length) ||
(fin && fin_outstanding_);
}
void QuicStream::CloseReadSide() {
if (read_side_closed_) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Done reading from stream " << id();
read_side_closed_ = true;
sequencer_.ReleaseBuffer();
if (write_side_closed_) {
QUIC_DVLOG(1) << ENDPOINT << "Closing stream " << id();
session_->OnStreamClosed(id());
OnClose();
}
}
void QuicStream::CloseWriteSide() {
if (write_side_closed_) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "Done writing to stream " << id();
write_side_closed_ = true;
if (read_side_closed_) {
QUIC_DVLOG(1) << ENDPOINT << "Closing stream " << id();
session_->OnStreamClosed(id());
OnClose();
}
}
void QuicStream::MaybeSendStopSending(QuicResetStreamError error) {
if (stop_sending_sent_) {
return;
}
if (!session()->version().UsesHttp3() && !error.ok()) {
return;
}
if (session()->version().UsesHttp3()) {
session()->MaybeSendStopSendingFrame(id(), error);
} else {
QUICHE_DCHECK_EQ(QUIC_STREAM_NO_ERROR, error.internal_code());
session()->MaybeSendRstStreamFrame(id(), QuicResetStreamError::NoError(),
stream_bytes_written());
}
stop_sending_sent_ = true;
CloseReadSide();
}
void QuicStream::MaybeSendRstStream(QuicResetStreamError error) {
if (rst_sent_) {
return;
}
if (!session()->version().UsesHttp3()) {
QUIC_BUG_IF(quic_bug_12570_5, error.ok());
stop_sending_sent_ = true;
CloseReadSide();
}
session()->MaybeSendRstStreamFrame(id(), error, stream_bytes_written());
rst_sent_ = true;
CloseWriteSide();
}
bool QuicStream::HasBufferedData() const {
QUICHE_DCHECK_GE(send_buffer_.stream_offset(), stream_bytes_written());
return send_buffer_.stream_offset() > stream_bytes_written();
}
ParsedQuicVersion QuicStream::version() const { return session_->version(); }
QuicTransportVersion QuicStream::transport_version() const {
return session_->transport_version();
}
HandshakeProtocol QuicStream::handshake_protocol() const {
return session_->connection()->version().handshake_protocol;
}
void QuicStream::StopReading() {
QUIC_DVLOG(1) << ENDPOINT << "Stop reading from stream " << id();
sequencer_.StopReading();
}
void QuicStream::OnClose() {
QUICHE_DCHECK(read_side_closed_ && write_side_closed_);
if (!fin_sent_ && !rst_sent_) {
QUIC_BUG_IF(quic_bug_12570_6, session()->connection()->connected() &&
session()->version().UsesHttp3())
<< "The stream should've already sent RST in response to "
"STOP_SENDING";
MaybeSendRstStream(QUIC_RST_ACKNOWLEDGEMENT);
session_->MaybeCloseZombieStream(id_);
}
if (!flow_controller_.has_value() ||
flow_controller_->FlowControlViolation() ||
connection_flow_controller_->FlowControlViolation()) {
return;
}
QuicByteCount bytes_to_consume =
flow_controller_->highest_received_byte_offset() -
flow_controller_->bytes_consumed();
AddBytesConsumed(bytes_to_consume);
}
void QuicStream::OnWindowUpdateFrame(const QuicWindowUpdateFrame& frame) {
if (type_ == READ_UNIDIRECTIONAL) {
OnUnrecoverableError(
QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM,
"WindowUpdateFrame received on READ_UNIDIRECTIONAL stream.");
return;
}
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_9)
<< ENDPOINT
<< "OnWindowUpdateFrame called on stream without flow control";
return;
}
if (flow_controller_->UpdateSendWindowOffset(frame.max_data)) {
session_->MarkConnectionLevelWriteBlocked(id_);
}
}
bool QuicStream::MaybeIncreaseHighestReceivedOffset(
QuicStreamOffset new_offset) {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_10)
<< ENDPOINT
<< "MaybeIncreaseHighestReceivedOffset called on stream without "
"flow control";
return false;
}
uint64_t increment =
new_offset - flow_controller_->highest_received_byte_offset();
if (!flow_controller_->UpdateHighestReceivedOffset(new_offset)) {
return false;
}
if (stream_contributes_to_connection_flow_control_) {
connection_flow_controller_->UpdateHighestReceivedOffset(
connection_flow_controller_->highest_received_byte_offset() +
increment);
}
return true;
}
void QuicStream::AddBytesSent(QuicByteCount bytes) {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_11)
<< ENDPOINT << "AddBytesSent called on stream without flow control";
return;
}
flow_controller_->AddBytesSent(bytes);
if (stream_contributes_to_connection_flow_control_) {
connection_flow_controller_->AddBytesSent(bytes);
}
}
void QuicStream::AddBytesConsumed(QuicByteCount bytes) {
if (type_ == CRYPTO) {
return;
}
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_12570_7)
<< ENDPOINT
<< "AddBytesConsumed called on non-crypto stream without flow control";
return;
}
if (!read_side_closed_) {
flow_controller_->AddBytesConsumed(bytes);
}
if (stream_contributes_to_connection_flow_control_) {
connection_flow_controller_->AddBytesConsumed(bytes);
}
MaybeCloseStreamWithBufferedReset();
}
bool QuicStream::MaybeConfigSendWindowOffset(QuicStreamOffset new_offset,
bool was_zero_rtt_rejected) {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_12)
<< ENDPOINT
<< "ConfigSendWindowOffset called on stream without flow control";
return false;
}
if (new_offset < flow_controller_->send_window_offset()) {
QUICHE_DCHECK(session()->version().UsesTls());
if (was_zero_rtt_rejected && new_offset < flow_controller_->bytes_sent()) {
QUIC_BUG_IF(quic_bug_12570_8, perspective_ == Perspective::IS_SERVER)
<< "Server streams' flow control should never be configured twice.";
OnUnrecoverableError(
QUIC_ZERO_RTT_UNRETRANSMITTABLE,
absl::StrCat(
"Server rejected 0-RTT, aborting because new stream max data ",
new_offset, " for stream ", id_, " is less than currently used: ",
flow_controller_->bytes_sent()));
return false;
} else if (session()->version().AllowsLowFlowControlLimits()) {
QUIC_BUG_IF(quic_bug_12570_9, perspective_ == Perspective::IS_SERVER)
<< "Server streams' flow control should never be configured twice.";
OnUnrecoverableError(
was_zero_rtt_rejected ? QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED
: QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED,
absl::StrCat(
was_zero_rtt_rejected ? "Server rejected 0-RTT, aborting because "
: "",
"new stream max data ", new_offset, " decreases current limit: ",
flow_controller_->send_window_offset()));
return false;
}
}
if (flow_controller_->UpdateSendWindowOffset(new_offset)) {
session_->MarkConnectionLevelWriteBlocked(id_);
}
return true;
}
void QuicStream::AddRandomPaddingAfterFin() {
add_random_padding_after_fin_ = true;
}
bool QuicStream::OnStreamFrameAcked(QuicStreamOffset offset,
QuicByteCount data_length, bool fin_acked,
QuicTime::Delta ,
QuicTime ,
QuicByteCount* newly_acked_length) {
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_ << " Acking "
<< "[" << offset << ", " << offset + data_length << "]"
<< " fin = " << fin_acked;
*newly_acked_length = 0;
if (!send_buffer_.OnStreamDataAcked(offset, data_length,
newly_acked_length)) {
OnUnrecoverableError(QUIC_INTERNAL_ERROR, "Trying to ack unsent data.");
return false;
}
if (!fin_sent_ && fin_acked) {
OnUnrecoverableError(QUIC_INTERNAL_ERROR, "Trying to ack unsent fin.");
return false;
}
const bool new_data_acked =
*newly_acked_length > 0 || (fin_acked && fin_outstanding_);
if (fin_acked) {
fin_outstanding_ = false;
fin_lost_ = false;
}
if (!IsWaitingForAcks() && write_side_closed_ &&
!write_side_data_recvd_state_notified_) {
OnWriteSideInDataRecvdState();
write_side_data_recvd_state_notified_ = true;
}
if (!IsWaitingForAcks() && read_side_closed_ && write_side_closed_) {
session_->MaybeCloseZombieStream(id_);
}
return new_data_acked;
}
void QuicStream::OnStreamFrameRetransmitted(QuicStreamOffset offset,
QuicByteCount data_length,
bool fin_retransmitted) {
send_buffer_.OnStreamDataRetransmitted(offset, data_length);
if (fin_retransmitted) {
fin_lost_ = false;
}
}
void QuicStream::OnStreamFrameLost(QuicStreamOffset offset,
QuicByteCount data_length, bool fin_lost) {
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_ << " Losting "
<< "[" << offset << ", " << offset + data_length << "]"
<< " fin = " << fin_lost;
if (data_length > 0) {
send_buffer_.OnStreamDataLost(offset, data_length);
}
if (fin_lost && fin_outstanding_) {
fin_lost_ = true;
}
}
bool QuicStream::RetransmitStreamData(QuicStreamOffset offset,
QuicByteCount data_length, bool fin,
TransmissionType type) {
QUICHE_DCHECK(type == PTO_RETRANSMISSION);
if (HasDeadlinePassed()) {
OnDeadlinePassed();
return true;
}
QuicIntervalSet<QuicStreamOffset> retransmission(offset,
offset + data_length);
retransmission.Difference(bytes_acked());
bool retransmit_fin = fin && fin_outstanding_;
if (retransmission.Empty() && !retransmit_fin) {
return true;
}
QuicConsumedData consumed(0, false);
for (const auto& interval : retransmission) {
QuicStreamOffset retransmission_offset = interval.min();
QuicByteCount retransmission_length = interval.max() - interval.min();
const bool can_bundle_fin =
retransmit_fin && (retransmission_offset + retransmission_length ==
stream_bytes_written());
consumed = stream_delegate_->WritevData(
id_, retransmission_length, retransmission_offset,
can_bundle_fin ? FIN : NO_FIN, type,
session()->GetEncryptionLevelToSendApplicationData());
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_
<< " is forced to retransmit stream data ["
<< retransmission_offset << ", "
<< retransmission_offset + retransmission_length
<< ") and fin: " << can_bundle_fin
<< ", consumed: " << consumed;
OnStreamFrameRetransmitted(retransmission_offset, consumed.bytes_consumed,
consumed.fin_consumed);
if (can_bundle_fin) {
retransmit_fin = !consumed.fin_consumed;
}
if (consumed.bytes_consumed < retransmission_length ||
(can_bundle_fin && !consumed.fin_consumed)) {
return false;
}
}
if (retransmit_fin) {
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_
<< " retransmits fin only frame.";
consumed = stream_delegate_->WritevData(
id_, 0, stream_bytes_written(), FIN, type,
session()->GetEncryptionLevelToSendApplicationData());
if (!consumed.fin_consumed) {
return false;
}
}
return true;
}
bool QuicStream::IsWaitingForAcks() const {
return (!rst_sent_ || stream_error_.ok()) &&
(send_buffer_.stream_bytes_outstanding() || fin_outstanding_);
}
bool QuicStream::WriteStreamData(QuicStreamOffset offset,
QuicByteCount data_length,
QuicDataWriter* writer) {
QUICHE_DCHECK_LT(0u, data_length);
QUIC_DVLOG(2) << ENDPOINT << "Write stream " << id_ << " data from offset "
<< offset << " length " << data_length;
return send_buffer_.WriteStreamData(offset, data_length, writer);
}
void QuicStream::WriteBufferedData(EncryptionLevel level) {
QUICHE_DCHECK(!write_side_closed_ && (HasBufferedData() || fin_buffered_));
if (session_->ShouldYield(id())) {
session_->MarkConnectionLevelWriteBlocked(id());
return;
}
QuicByteCount write_length = BufferedDataBytes();
bool fin_with_zero_data = (fin_buffered_ && write_length == 0);
bool fin = fin_buffered_;
QUIC_BUG_IF(quic_bug_10586_13, !flow_controller_.has_value())
<< ENDPOINT << "WriteBufferedData called on stream without flow control";
QuicByteCount send_window = CalculateSendWindowSize();
if (send_window == 0 && !fin_with_zero_data) {
MaybeSendBlocked();
return;
}
if (write_length > send_window) {
fin = false;
write_length = send_window;
QUIC_DVLOG(1) << "stream " << id() << " shortens write length to "
<< write_length << " due to flow control";
}
StreamSendingState state = fin ? FIN : NO_FIN;
if (fin && add_random_padding_after_fin_) {
state = FIN_AND_PADDING;
}
QuicConsumedData consumed_data =
stream_delegate_->WritevData(id(), write_length, stream_bytes_written(),
state, NOT_RETRANSMISSION, level);
OnStreamDataConsumed(consumed_data.bytes_consumed);
AddBytesSent(consumed_data.bytes_consumed);
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_ << " sends "
<< stream_bytes_written() << " bytes "
<< " and has buffered data " << BufferedDataBytes() << " bytes."
<< " fin is sent: " << consumed_data.fin_consumed
<< " fin is buffered: " << fin_buffered_;
if (write_side_closed_) {
return;
}
if (consumed_data.bytes_consumed == write_length) {
if (!fin_with_zero_data) {
MaybeSendBlocked();
}
if (fin && consumed_data.fin_consumed) {
QUICHE_DCHECK(!fin_sent_);
fin_sent_ = true;
fin_outstanding_ = true;
if (fin_received_) {
QUICHE_DCHECK(!was_draining_);
session_->StreamDraining(id_,
type_ != BIDIRECTIONAL);
was_draining_ = true;
}
CloseWriteSide();
} else if (fin && !consumed_data.fin_consumed && !write_side_closed_) {
session_->MarkConnectionLevelWriteBlocked(id());
}
} else {
session_->MarkConnectionLevelWriteBlocked(id());
}
if (consumed_data.bytes_consumed > 0 || consumed_data.fin_consumed) {
busy_counter_ = 0;
}
}
uint64_t QuicStream::BufferedDataBytes() const {
QUICHE_DCHECK_GE(send_buffer_.stream_offset(), stream_bytes_written());
return send_buffer_.stream_offset() - stream_bytes_written();
}
bool QuicStream::CanWriteNewData() const {
return BufferedDataBytes() < buffered_data_threshold_;
}
bool QuicStream::CanWriteNewDataAfterData(QuicByteCount length) const {
return (BufferedDataBytes() + length) < buffered_data_threshold_;
}
uint64_t QuicStream::stream_bytes_written() const {
return send_buffer_.stream_bytes_written();
}
const QuicIntervalSet<QuicStreamOffset>& QuicStream::bytes_acked() const {
return send_buffer_.bytes_acked();
}
void QuicStream::OnStreamDataConsumed(QuicByteCount bytes_consumed) {
send_buffer_.OnStreamDataConsumed(bytes_consumed);
}
void QuicStream::WritePendingRetransmission() {
while (HasPendingRetransmission()) {
QuicConsumedData consumed(0, false);
if (!send_buffer_.HasPendingRetransmission()) {
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_
<< " retransmits fin only frame.";
consumed = stream_delegate_->WritevData(
id_, 0, stream_bytes_written(), FIN, LOSS_RETRANSMISSION,
session()->GetEncryptionLevelToSendApplicationData());
fin_lost_ = !consumed.fin_consumed;
if (fin_lost_) {
return;
}
} else {
StreamPendingRetransmission pending =
send_buffer_.NextPendingRetransmission();
const bool can_bundle_fin =
fin_lost_ &&
(pending.offset + pending.length == stream_bytes_written());
consumed = stream_delegate_->WritevData(
id_, pending.length, pending.offset, can_bundle_fin ? FIN : NO_FIN,
LOSS_RETRANSMISSION,
session()->GetEncryptionLevelToSendApplicationData());
QUIC_DVLOG(1) << ENDPOINT << "stream " << id_
<< " tries to retransmit stream data [" << pending.offset
<< ", " << pending.offset + pending.length
<< ") and fin: " << can_bundle_fin
<< ", consumed: " << consumed;
OnStreamFrameRetransmitted(pending.offset, consumed.bytes_consumed,
consumed.fin_consumed);
if (consumed.bytes_consumed < pending.length ||
(can_bundle_fin && !consumed.fin_consumed)) {
return;
}
}
}
}
bool QuicStream::MaybeSetTtl(QuicTime::Delta ttl) {
if (is_static_) {
QUIC_BUG(quic_bug_10586_14) << "Cannot set TTL of a static stream.";
return false;
}
if (deadline_.IsInitialized()) {
QUIC_DLOG(WARNING) << "Deadline has already been set.";
return false;
}
QuicTime now = session()->connection()->clock()->ApproximateNow();
deadline_ = now + ttl;
return true;
}
bool QuicStream::HasDeadlinePassed() const {
if (!deadline_.IsInitialized()) {
return false;
}
QuicTime now = session()->connection()->clock()->ApproximateNow();
if (now < deadline_) {
return false;
}
QUIC_DVLOG(1) << "stream " << id() << " deadline has passed";
return true;
}
void QuicStream::MaybeCloseStreamWithBufferedReset() {
if (buffered_reset_stream_at_.has_value() && !sequencer_.IsClosed() &&
NumBytesConsumed() >= buffered_reset_stream_at_->reliable_offset) {
OnStreamReset(buffered_reset_stream_at_->ToRstStream());
buffered_reset_stream_at_ = std::nullopt;
}
}
void QuicStream::OnDeadlinePassed() { Reset(QUIC_STREAM_TTL_EXPIRED); }
bool QuicStream::IsFlowControlBlocked() const {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_15)
<< "Trying to access non-existent flow controller.";
return false;
}
return flow_controller_->IsBlocked();
}
QuicStreamOffset QuicStream::highest_received_byte_offset() const {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_16)
<< "Trying to access non-existent flow controller.";
return 0;
}
return flow_controller_->highest_received_byte_offset();
}
void QuicStream::UpdateReceiveWindowSize(QuicStreamOffset size) {
if (!flow_controller_.has_value()) {
QUIC_BUG(quic_bug_10586_17)
<< "Trying to access non-existent flow controller.";
return;
}
flow_controller_->UpdateReceiveWindowSize(size);
}
std::optional<QuicByteCount> QuicStream::GetSendWindow() const {
return flow_controller_.has_value()
? std::optional<QuicByteCount>(flow_controller_->SendWindowSize())
: std::nullopt;
}
std::optional<QuicByteCount> QuicStream::GetReceiveWindow() const {
return flow_controller_.has_value()
? std::optional<QuicByteCount>(
flow_controller_->receive_window_size())
: std::nullopt;
}
void QuicStream::OnStreamCreatedFromPendingStream() {
sequencer()->SetUnblocked();
}
QuicByteCount QuicStream::CalculateSendWindowSize() const {
QuicByteCount send_window;
if (flow_controller_.has_value()) {
send_window = flow_controller_->SendWindowSize();
} else {
send_window = std::numeric_limits<QuicByteCount>::max();
}
if (stream_contributes_to_connection_flow_control_) {
send_window =
std::min(send_window, connection_flow_controller_->SendWindowSize());
}
return send_window;
}
} | #include "quiche/quic/core/quic_stream.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/frames/quic_connection_close_frame.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/frames/quic_rst_stream_frame.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/quic_write_blocked_list.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_flow_controller_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_stream_sequencer_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/quiche_mem_slice_storage.h"
using testing::_;
using testing::AnyNumber;
using testing::AtLeast;
using testing::InSequence;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
const char kData1[] = "FooAndBar";
const char kData2[] = "EepAndBaz";
const QuicByteCount kDataLen = 9;
const uint8_t kPacket0ByteConnectionId = 0;
const uint8_t kPacket8ByteConnectionId = 8;
class TestStream : public QuicStream {
public:
TestStream(QuicStreamId id, QuicSession* session, StreamType type)
: QuicStream(id, session, false, type) {
sequencer()->set_level_triggered(true);
}
TestStream(PendingStream* pending, QuicSession* session, bool is_static)
: QuicStream(pending, session, is_static) {}
MOCK_METHOD(void, OnDataAvailable, (), (override));
MOCK_METHOD(void, OnCanWriteNewData, (), (override));
MOCK_METHOD(void, OnWriteSideInDataRecvdState, (), (override));
using QuicStream::CanWriteNewData;
using QuicStream::CanWriteNewDataAfterData;
using QuicStream::CloseWriteSide;
using QuicStream::fin_buffered;
using QuicStream::MaybeSendStopSending;
using QuicStream::OnClose;
using QuicStream::WriteMemSlices;
using QuicStream::WriteOrBufferData;
void ConsumeData(size_t num_bytes) {
char buffer[1024];
ASSERT_GT(ABSL_ARRAYSIZE(buffer), num_bytes);
struct iovec iov;
iov.iov_base = buffer;
iov.iov_len = num_bytes;
ASSERT_EQ(num_bytes, QuicStreamPeer::sequencer(this)->Readv(&iov, 1));
}
private:
std::string data_;
};
class QuicStreamTest : public QuicTestWithParam<ParsedQuicVersion> {
public:
QuicStreamTest()
: zero_(QuicTime::Delta::Zero()),
supported_versions_(AllSupportedVersions()) {}
void Initialize(Perspective perspective = Perspective::IS_SERVER) {
ParsedQuicVersionVector version_vector;
version_vector.push_back(GetParam());
connection_ = new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective, version_vector);
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
session_ = std::make_unique<StrictMock<MockQuicSession>>(connection_);
session_->Initialize();
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection_->perspective()));
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_->config(), 10);
session_->OnConfigNegotiated();
stream_ = new StrictMock<TestStream>(kTestStreamId, session_.get(),
BIDIRECTIONAL);
EXPECT_NE(nullptr, stream_);
EXPECT_CALL(*session_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
session_->ActivateStream(absl::WrapUnique(stream_));
EXPECT_CALL(*session_, MaybeSendStopSendingFrame(kTestStreamId, _))
.Times(AnyNumber());
EXPECT_CALL(*session_, MaybeSendRstStreamFrame(kTestStreamId, _, _))
.Times(AnyNumber());
write_blocked_list_ =
QuicSessionPeer::GetWriteBlockedStreams(session_.get());
}
bool fin_sent() { return stream_->fin_sent(); }
bool rst_sent() { return stream_->rst_sent(); }
bool HasWriteBlockedStreams() {
return write_blocked_list_->HasWriteBlockedSpecialStream() ||
write_blocked_list_->HasWriteBlockedDataStreams();
}
QuicConsumedData CloseStreamOnWriteError(
QuicStreamId id, QuicByteCount ,
QuicStreamOffset , StreamSendingState ,
TransmissionType , std::optional<EncryptionLevel> ) {
session_->ResetStream(id, QUIC_STREAM_CANCELLED);
return QuicConsumedData(1, false);
}
bool ClearResetStreamFrame(const QuicFrame& frame) {
EXPECT_EQ(RST_STREAM_FRAME, frame.type);
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
bool ClearStopSendingFrame(const QuicFrame& frame) {
EXPECT_EQ(STOP_SENDING_FRAME, frame.type);
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
protected:
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
MockQuicConnection* connection_;
std::unique_ptr<MockQuicSession> session_;
StrictMock<TestStream>* stream_;
QuicWriteBlockedListInterface* write_blocked_list_;
QuicTime::Delta zero_;
ParsedQuicVersionVector supported_versions_;
QuicStreamId kTestStreamId = GetNthClientInitiatedBidirectionalStreamId(
GetParam().transport_version, 1);
const QuicStreamId kTestPendingStreamId =
GetNthClientInitiatedUnidirectionalStreamId(GetParam().transport_version,
1);
};
INSTANTIATE_TEST_SUITE_P(QuicStreamTests, QuicStreamTest,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
using PendingStreamTest = QuicStreamTest;
INSTANTIATE_TEST_SUITE_P(PendingStreamTests, PendingStreamTest,
::testing::ValuesIn(CurrentSupportedHttp3Versions()),
::testing::PrintToStringParamName());
TEST_P(PendingStreamTest, PendingStreamStaticness) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
TestStream stream(&pending, session_.get(), false);
EXPECT_FALSE(stream.is_static());
PendingStream pending2(kTestPendingStreamId + 4, session_.get());
TestStream stream2(&pending2, session_.get(), true);
EXPECT_TRUE(stream2.is_static());
}
TEST_P(PendingStreamTest, PendingStreamType) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
TestStream stream(&pending, session_.get(), false);
EXPECT_EQ(stream.type(), READ_UNIDIRECTIONAL);
}
TEST_P(PendingStreamTest, PendingStreamTypeOnClient) {
Initialize(Perspective::IS_CLIENT);
QuicStreamId server_initiated_pending_stream_id =
GetNthServerInitiatedUnidirectionalStreamId(session_->transport_version(),
1);
PendingStream pending(server_initiated_pending_stream_id, session_.get());
TestStream stream(&pending, session_.get(), false);
EXPECT_EQ(stream.type(), READ_UNIDIRECTIONAL);
}
TEST_P(PendingStreamTest, PendingStreamTooMuchData) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
QuicStreamFrame frame(kTestPendingStreamId, false,
kInitialSessionFlowControlWindowForTest + 1, ".");
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
pending.OnStreamFrame(frame);
}
TEST_P(PendingStreamTest, PendingStreamTooMuchDataInRstStream) {
Initialize();
PendingStream pending1(kTestPendingStreamId, session_.get());
QuicRstStreamFrame frame1(kInvalidControlFrameId, kTestPendingStreamId,
QUIC_STREAM_CANCELLED,
kInitialSessionFlowControlWindowForTest + 1);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
pending1.OnRstStreamFrame(frame1);
QuicStreamId bidirection_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
session_->transport_version(), Perspective::IS_CLIENT);
PendingStream pending2(bidirection_stream_id, session_.get());
QuicRstStreamFrame frame2(kInvalidControlFrameId, bidirection_stream_id,
QUIC_STREAM_CANCELLED,
kInitialSessionFlowControlWindowForTest + 1);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
pending2.OnRstStreamFrame(frame2);
}
TEST_P(PendingStreamTest, PendingStreamRstStream) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
QuicStreamOffset final_byte_offset = 7;
QuicRstStreamFrame frame(kInvalidControlFrameId, kTestPendingStreamId,
QUIC_STREAM_CANCELLED, final_byte_offset);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
pending.OnRstStreamFrame(frame);
}
TEST_P(PendingStreamTest, PendingStreamWindowUpdate) {
Initialize();
QuicStreamId bidirection_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
session_->transport_version(), Perspective::IS_CLIENT);
PendingStream pending(bidirection_stream_id, session_.get());
QuicWindowUpdateFrame frame(kInvalidControlFrameId, bidirection_stream_id,
kDefaultFlowControlSendWindow * 2);
pending.OnWindowUpdateFrame(frame);
TestStream stream(&pending, session_.get(), false);
EXPECT_EQ(QuicStreamPeer::SendWindowSize(&stream),
kDefaultFlowControlSendWindow * 2);
}
TEST_P(PendingStreamTest, PendingStreamStopSending) {
Initialize();
QuicStreamId bidirection_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
session_->transport_version(), Perspective::IS_CLIENT);
PendingStream pending(bidirection_stream_id, session_.get());
QuicResetStreamError error =
QuicResetStreamError::FromInternal(QUIC_STREAM_INTERNAL_ERROR);
pending.OnStopSending(error);
EXPECT_TRUE(pending.GetStopSendingErrorCode());
auto actual_error = *pending.GetStopSendingErrorCode();
EXPECT_EQ(actual_error, error);
}
TEST_P(PendingStreamTest, FromPendingStream) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
QuicStreamFrame frame(kTestPendingStreamId, false, 2, ".");
pending.OnStreamFrame(frame);
pending.OnStreamFrame(frame);
QuicStreamFrame frame2(kTestPendingStreamId, true, 3, ".");
pending.OnStreamFrame(frame2);
TestStream stream(&pending, session_.get(), false);
EXPECT_EQ(3, stream.num_frames_received());
EXPECT_EQ(3u, stream.stream_bytes_read());
EXPECT_EQ(1, stream.num_duplicate_frames_received());
EXPECT_EQ(true, stream.fin_received());
EXPECT_EQ(frame2.offset + 1, stream.highest_received_byte_offset());
EXPECT_EQ(frame2.offset + 1,
session_->flow_controller()->highest_received_byte_offset());
}
TEST_P(PendingStreamTest, FromPendingStreamThenData) {
Initialize();
PendingStream pending(kTestPendingStreamId, session_.get());
QuicStreamFrame frame(kTestPendingStreamId, false, 2, ".");
pending.OnStreamFrame(frame);
auto stream = new TestStream(&pending, session_.get(), false);
session_->ActivateStream(absl::WrapUnique(stream));
QuicStreamFrame frame2(kTestPendingStreamId, true, 3, ".");
stream->OnStreamFrame(frame2);
EXPECT_EQ(2, stream->num_frames_received());
EXPECT_EQ(2u, stream->stream_bytes_read());
EXPECT_EQ(true, stream->fin_received());
EXPECT_EQ(frame2.offset + 1, stream->highest_received_byte_offset());
EXPECT_EQ(frame2.offset + 1,
session_->flow_controller()->highest_received_byte_offset());
}
TEST_P(PendingStreamTest, ResetStreamAt) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
PendingStream pending(kTestPendingStreamId, session_.get());
QuicResetStreamAtFrame rst(0, kTestPendingStreamId, QUIC_STREAM_CANCELLED,
100, 3);
pending.OnResetStreamAtFrame(rst);
QuicStreamFrame frame(kTestPendingStreamId, false, 2, ".");
pending.OnStreamFrame(frame);
auto stream = new TestStream(&pending, session_.get(), false);
session_->ActivateStream(absl::WrapUnique(stream));
EXPECT_FALSE(stream->rst_received());
EXPECT_FALSE(stream->read_side_closed());
EXPECT_CALL(*stream, OnDataAvailable()).WillOnce([&]() {
stream->ConsumeData(3);
});
QuicStreamFrame frame2(kTestPendingStreamId, false, 0, "..");
stream->OnStreamFrame(frame2);
EXPECT_TRUE(stream->read_side_closed());
EXPECT_TRUE(stream->rst_received());
}
TEST_P(QuicStreamTest, WriteAllData) {
Initialize();
QuicByteCount length =
1 + QuicPacketCreator::StreamFramePacketOverhead(
connection_->transport_version(), kPacket8ByteConnectionId,
kPacket0ByteConnectionId, !kIncludeVersion,
!kIncludeDiversificationNonce, PACKET_4BYTE_PACKET_NUMBER,
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0,
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0, 0u);
connection_->SetMaxPacketLength(length);
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_FALSE(HasWriteBlockedStreams());
}
TEST_P(QuicStreamTest, NoBlockingIfNoDataOrFin) {
Initialize();
EXPECT_QUIC_BUG(
stream_->WriteOrBufferData(absl::string_view(), false, nullptr), "");
EXPECT_FALSE(HasWriteBlockedStreams());
}
TEST_P(QuicStreamTest, BlockIfOnlySomeDataConsumed) {
Initialize();
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 1u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 2), false, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
ASSERT_EQ(1u, write_blocked_list_->NumBlockedStreams());
EXPECT_EQ(1u, stream_->BufferedDataBytes());
}
TEST_P(QuicStreamTest, BlockIfFinNotConsumedWithData) {
Initialize();
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 2u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 2), true, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
ASSERT_EQ(1u, write_blocked_list_->NumBlockedStreams());
}
TEST_P(QuicStreamTest, BlockIfSoloFinNotConsumed) {
Initialize();
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(Return(QuicConsumedData(0, false)));
stream_->WriteOrBufferData(absl::string_view(), true, nullptr);
ASSERT_EQ(1u, write_blocked_list_->NumBlockedStreams());
}
TEST_P(QuicStreamTest, CloseOnPartialWrite) {
Initialize();
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(Invoke(this, &QuicStreamTest::CloseStreamOnWriteError));
stream_->WriteOrBufferData(absl::string_view(kData1, 2), false, nullptr);
ASSERT_EQ(0u, write_blocked_list_->NumBlockedStreams());
}
TEST_P(QuicStreamTest, WriteOrBufferData) {
Initialize();
EXPECT_FALSE(HasWriteBlockedStreams());
QuicByteCount length =
1 + QuicPacketCreator::StreamFramePacketOverhead(
connection_->transport_version(), kPacket8ByteConnectionId,
kPacket0ByteConnectionId, !kIncludeVersion,
!kIncludeDiversificationNonce, PACKET_4BYTE_PACKET_NUMBER,
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0,
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_0, 0u);
connection_->SetMaxPacketLength(length);
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), kDataLen - 1, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, stream_->BufferedDataBytes());
EXPECT_TRUE(HasWriteBlockedStreams());
stream_->WriteOrBufferData(kData2, false, nullptr);
EXPECT_EQ(10u, stream_->BufferedDataBytes());
InSequence s;
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), kDataLen - 1, kDataLen - 1,
NO_FIN, NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData());
stream_->OnCanWrite();
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 2u, 2 * kDataLen - 2,
NO_FIN, NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData());
stream_->OnCanWrite();
EXPECT_TRUE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, WriteOrBufferDataReachStreamLimit) {
Initialize();
std::string data("aaaaa");
QuicStreamPeer::SetStreamBytesWritten(kMaxStreamLength - data.length(),
stream_);
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(data, false, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_QUIC_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _));
stream_->WriteOrBufferData("a", false, nullptr);
},
"Write too many data via stream");
}
TEST_P(QuicStreamTest, ConnectionCloseAfterStreamClose) {
Initialize();
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(session_->transport_version())) {
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED);
session_->OnStopSendingFrame(stop_sending);
}
EXPECT_THAT(stream_->stream_error(), IsStreamError(QUIC_STREAM_CANCELLED));
EXPECT_THAT(stream_->connection_error(), IsQuicNoError());
QuicConnectionCloseFrame frame;
frame.quic_error_code = QUIC_INTERNAL_ERROR;
stream_->OnConnectionClosed(frame, ConnectionCloseSource::FROM_SELF);
EXPECT_THAT(stream_->stream_error(), IsStreamError(QUIC_STREAM_CANCELLED));
EXPECT_THAT(stream_->connection_error(), IsQuicNoError());
}
TEST_P(QuicStreamTest, RstAlwaysSentIfNoFinSent) {
Initialize();
EXPECT_FALSE(fin_sent());
EXPECT_FALSE(rst_sent());
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 1u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 1), false, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_FALSE(fin_sent());
EXPECT_FALSE(rst_sent());
EXPECT_CALL(*session_, MaybeSendRstStreamFrame(kTestStreamId, _, _));
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(session_->transport_version())) {
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED);
session_->OnStopSendingFrame(stop_sending);
}
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_FALSE(fin_sent());
EXPECT_TRUE(rst_sent());
}
TEST_P(QuicStreamTest, RstNotSentIfFinSent) {
Initialize();
EXPECT_FALSE(fin_sent());
EXPECT_FALSE(rst_sent());
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 1u, 0u, FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 1), true, nullptr);
EXPECT_TRUE(fin_sent());
EXPECT_FALSE(rst_sent());
QuicStreamPeer::CloseReadSide(stream_);
stream_->CloseWriteSide();
EXPECT_TRUE(fin_sent());
EXPECT_FALSE(rst_sent());
}
TEST_P(QuicStreamTest, OnlySendOneRst) {
Initialize();
EXPECT_FALSE(fin_sent());
EXPECT_FALSE(rst_sent());
EXPECT_CALL(*session_, MaybeSendRstStreamFrame(kTestStreamId, _, _)).Times(1);
stream_->Reset(QUIC_STREAM_CANCELLED);
EXPECT_FALSE(fin_sent());
EXPECT_TRUE(rst_sent());
QuicStreamPeer::CloseReadSide(stream_);
stream_->CloseWriteSide();
EXPECT_FALSE(fin_sent());
EXPECT_TRUE(rst_sent());
}
TEST_P(QuicStreamTest, StreamFlowControlMultipleWindowUpdates) {
Initialize();
EXPECT_EQ(kMinimumFlowControlSendWindow,
QuicStreamPeer::SendWindowOffset(stream_));
QuicWindowUpdateFrame window_update_1(kInvalidControlFrameId, stream_->id(),
kMinimumFlowControlSendWindow + 5);
stream_->OnWindowUpdateFrame(window_update_1);
EXPECT_EQ(window_update_1.max_data,
QuicStreamPeer::SendWindowOffset(stream_));
QuicWindowUpdateFrame window_update_2(kInvalidControlFrameId, stream_->id(),
1);
QuicWindowUpdateFrame window_update_3(kInvalidControlFrameId, stream_->id(),
kMinimumFlowControlSendWindow + 10);
QuicWindowUpdateFrame window_update_4(kInvalidControlFrameId, stream_->id(),
5678);
stream_->OnWindowUpdateFrame(window_update_2);
stream_->OnWindowUpdateFrame(window_update_3);
stream_->OnWindowUpdateFrame(window_update_4);
EXPECT_EQ(window_update_3.max_data,
QuicStreamPeer::SendWindowOffset(stream_));
}
TEST_P(QuicStreamTest, FrameStats) {
Initialize();
EXPECT_EQ(0, stream_->num_frames_received());
EXPECT_EQ(0, stream_->num_duplicate_frames_received());
QuicStreamFrame frame(stream_->id(), false, 0, ".");
EXPECT_CALL(*stream_, OnDataAvailable()).Times(2);
stream_->OnStreamFrame(frame);
EXPECT_EQ(1, stream_->num_frames_received());
EXPECT_EQ(0, stream_->num_duplicate_frames_received());
stream_->OnStreamFrame(frame);
EXPECT_EQ(2, stream_->num_frames_received());
EXPECT_EQ(1, stream_->num_duplicate_frames_received());
QuicStreamFrame frame2(stream_->id(), false, 1, "abc");
stream_->OnStreamFrame(frame2);
}
TEST_P(QuicStreamTest, StreamSequencerNeverSeesPacketsViolatingFlowControl) {
Initialize();
QuicStreamFrame frame(stream_->id(), false,
kInitialSessionFlowControlWindowForTest + 1, ".");
EXPECT_GT(frame.offset, QuicStreamPeer::ReceiveWindowOffset(stream_));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
stream_->OnStreamFrame(frame);
}
TEST_P(QuicStreamTest, StopReadingSendsFlowControl) {
Initialize();
stream_->StopReading();
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _))
.Times(0);
EXPECT_CALL(*session_, WriteControlFrame(_, _))
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType));
std::string data(1000, 'x');
for (QuicStreamOffset offset = 0;
offset < 2 * kInitialStreamFlowControlWindowForTest;
offset += data.length()) {
QuicStreamFrame frame(stream_->id(), false, offset, data);
stream_->OnStreamFrame(frame);
}
EXPECT_LT(kInitialStreamFlowControlWindowForTest,
QuicStreamPeer::ReceiveWindowOffset(stream_));
}
TEST_P(QuicStreamTest, FinalByteOffsetFromFin) {
Initialize();
EXPECT_FALSE(stream_->HasReceivedFinalOffset());
QuicStreamFrame stream_frame_no_fin(stream_->id(), false, 1234, ".");
stream_->OnStreamFrame(stream_frame_no_fin);
EXPECT_FALSE(stream_->HasReceivedFinalOffset());
QuicStreamFrame stream_frame_with_fin(stream_->id(), true, 1234, ".");
stream_->OnStreamFrame(stream_frame_with_fin);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
}
TEST_P(QuicStreamTest, FinalByteOffsetFromRst) {
Initialize();
EXPECT_FALSE(stream_->HasReceivedFinalOffset());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
}
TEST_P(QuicStreamTest, InvalidFinalByteOffsetFromRst) {
Initialize();
EXPECT_FALSE(stream_->HasReceivedFinalOffset());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 0xFFFFFFFFFFFF);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_RECEIVED_TOO_MUCH_DATA, _, _));
stream_->OnStreamReset(rst_frame);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
}
TEST_P(QuicStreamTest, FinalByteOffsetFromZeroLengthStreamFrame) {
Initialize();
EXPECT_FALSE(stream_->HasReceivedFinalOffset());
const QuicStreamOffset kByteOffsetExceedingFlowControlWindow =
kInitialSessionFlowControlWindowForTest + 1;
const QuicStreamOffset current_stream_flow_control_offset =
QuicStreamPeer::ReceiveWindowOffset(stream_);
const QuicStreamOffset current_connection_flow_control_offset =
QuicFlowControllerPeer::ReceiveWindowOffset(session_->flow_controller());
ASSERT_GT(kByteOffsetExceedingFlowControlWindow,
current_stream_flow_control_offset);
ASSERT_GT(kByteOffsetExceedingFlowControlWindow,
current_connection_flow_control_offset);
QuicStreamFrame zero_length_stream_frame_with_fin(
stream_->id(), true, kByteOffsetExceedingFlowControlWindow,
absl::string_view());
EXPECT_EQ(0, zero_length_stream_frame_with_fin.data_length);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
stream_->OnStreamFrame(zero_length_stream_frame_with_fin);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
EXPECT_EQ(current_stream_flow_control_offset,
QuicStreamPeer::ReceiveWindowOffset(stream_));
EXPECT_EQ(
current_connection_flow_control_offset,
QuicFlowControllerPeer::ReceiveWindowOffset(session_->flow_controller()));
}
TEST_P(QuicStreamTest, OnStreamResetOffsetOverflow) {
Initialize();
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, kMaxStreamLength + 1);
EXPECT_CALL(*connection_, CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _));
stream_->OnStreamReset(rst_frame);
}
TEST_P(QuicStreamTest, OnStreamFrameUpperLimit) {
Initialize();
QuicStreamPeer::SetReceiveWindowOffset(stream_, kMaxStreamLength + 5u);
QuicFlowControllerPeer::SetReceiveWindowOffset(session_->flow_controller(),
kMaxStreamLength + 5u);
QuicStreamSequencerPeer::SetFrameBufferTotalBytesRead(
QuicStreamPeer::sequencer(stream_), kMaxStreamLength - 10u);
EXPECT_CALL(*connection_, CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _))
.Times(0);
QuicStreamFrame stream_frame(stream_->id(), false, kMaxStreamLength - 1, ".");
stream_->OnStreamFrame(stream_frame);
QuicStreamFrame stream_frame2(stream_->id(), true, kMaxStreamLength, "");
stream_->OnStreamFrame(stream_frame2);
}
TEST_P(QuicStreamTest, StreamTooLong) {
Initialize();
QuicStreamFrame stream_frame(stream_->id(), false, kMaxStreamLength, ".");
EXPECT_QUIC_PEER_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _))
.Times(1);
stream_->OnStreamFrame(stream_frame);
},
absl::StrCat("Receive stream frame on stream ", stream_->id(),
" reaches max stream length"));
}
TEST_P(QuicStreamTest, SetDrainingIncomingOutgoing) {
Initialize();
QuicStreamFrame stream_frame_with_fin(stream_->id(), true, 1234, ".");
stream_->OnStreamFrame(stream_frame_with_fin);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_FALSE(stream_->reading_stopped());
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 2u, 0u, FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 2), true, nullptr);
EXPECT_TRUE(stream_->write_side_closed());
EXPECT_EQ(1u, QuicSessionPeer::GetNumDrainingStreams(session_.get()));
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
TEST_P(QuicStreamTest, SetDrainingOutgoingIncoming) {
Initialize();
EXPECT_CALL(*session_, WritevData(kTestStreamId, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 2u, 0u, FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(absl::string_view(kData1, 2), true, nullptr);
EXPECT_TRUE(stream_->write_side_closed());
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
QuicStreamFrame stream_frame_with_fin(stream_->id(), true, 1234, ".");
stream_->OnStreamFrame(stream_frame_with_fin);
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_FALSE(stream_->reading_stopped());
EXPECT_EQ(1u, QuicSessionPeer::GetNumDrainingStreams(session_.get()));
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(session_.get()));
}
TEST_P(QuicStreamTest, EarlyResponseFinHandling) {
Initialize();
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*stream_, OnDataAvailable()).Times(1);
QuicStreamFrame frame1(stream_->id(), false, 0, "Start");
stream_->OnStreamFrame(frame1);
QuicStreamPeer::CloseReadSide(stream_);
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
QuicStreamFrame frame2(stream_->id(), true, 0, "End");
stream_->OnStreamFrame(frame2);
EXPECT_TRUE(stream_->fin_received());
EXPECT_TRUE(stream_->HasReceivedFinalOffset());
}
TEST_P(QuicStreamTest, StreamWaitsForAcks) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(session_->HasUnackedStreamData());
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
QuicByteCount newly_acked_length = 0;
EXPECT_TRUE(stream_->OnStreamFrameAcked(0, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(9u, newly_acked_length);
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData(kData2, false, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData("", true, nullptr);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->OnStreamFrameRetransmitted(9, 9, false);
EXPECT_TRUE(stream_->OnStreamFrameAcked(9, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(9u, newly_acked_length);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_CALL(*stream_, OnWriteSideInDataRecvdState());
EXPECT_TRUE(stream_->OnStreamFrameAcked(18, 0, true, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(0u, newly_acked_length);
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
}
TEST_P(QuicStreamTest, StreamDataGetAckedOutOfOrder) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData("", true, nullptr);
EXPECT_EQ(3u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
QuicByteCount newly_acked_length = 0;
EXPECT_TRUE(stream_->OnStreamFrameAcked(9, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(9u, newly_acked_length);
EXPECT_EQ(3u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->OnStreamFrameAcked(18, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(9u, newly_acked_length);
EXPECT_EQ(3u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->OnStreamFrameAcked(0, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(9u, newly_acked_length);
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_CALL(*stream_, OnWriteSideInDataRecvdState());
EXPECT_TRUE(stream_->OnStreamFrameAcked(27, 0, true, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(0u, newly_acked_length);
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, CancelStream) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->MaybeSendStopSending(QUIC_STREAM_NO_ERROR);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_CALL(*connection_,
OnStreamReset(stream_->id(), QUIC_STREAM_CANCELLED));
EXPECT_CALL(*session_, WriteControlFrame(_, _))
.Times(AtLeast(1))
.WillRepeatedly(Invoke(&ClearControlFrameWithTransmissionType));
EXPECT_CALL(*session_, MaybeSendRstStreamFrame(_, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
session_->ReallyMaybeSendRstStreamFrame(
stream_->id(), QUIC_STREAM_CANCELLED,
stream_->stream_bytes_written());
}));
stream_->Reset(QUIC_STREAM_CANCELLED);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, RstFrameReceivedStreamNotFinishSending) {
if (VersionHasIetfQuicFrames(GetParam().transport_version)) {
return;
}
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 9);
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
stream_->id(),
QuicResetStreamError::FromInternal(QUIC_RST_ACKNOWLEDGEMENT), 9));
stream_->OnStreamReset(rst_frame);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, RstFrameReceivedStreamFinishSending) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData(kData1, true, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
}
TEST_P(QuicStreamTest, ConnectionClosed) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
stream_->id(),
QuicResetStreamError::FromInternal(QUIC_RST_ACKNOWLEDGEMENT), 9));
QuicConnectionPeer::SetConnectionClose(connection_);
QuicConnectionCloseFrame frame;
frame.quic_error_code = QUIC_INTERNAL_ERROR;
stream_->OnConnectionClosed(frame, ConnectionCloseSource::FROM_SELF);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, CanWriteNewDataAfterData) {
SetQuicFlag(quic_buffered_data_threshold, 100);
Initialize();
EXPECT_TRUE(stream_->CanWriteNewDataAfterData(99));
EXPECT_FALSE(stream_->CanWriteNewDataAfterData(100));
}
TEST_P(QuicStreamTest, WriteBufferedData) {
SetQuicFlag(quic_buffered_data_threshold, 100);
Initialize();
std::string data(1024, 'a');
EXPECT_TRUE(stream_->CanWriteNewData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 100u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->WriteOrBufferData(data, false, nullptr);
stream_->WriteOrBufferData(data, false, nullptr);
stream_->WriteOrBufferData(data, false, nullptr);
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_EQ(3 * data.length() - 100, stream_->BufferedDataBytes());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 100, 100u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(0);
stream_->OnCanWrite();
EXPECT_EQ(3 * data.length() - 200, stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->CanWriteNewData());
QuicByteCount data_to_write =
3 * data.length() - 200 - GetQuicFlag(quic_buffered_data_threshold) + 1;
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this, data_to_write]() {
return session_->ConsumeData(stream_->id(), data_to_write, 200u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(1);
stream_->OnCanWrite();
EXPECT_EQ(
static_cast<uint64_t>(GetQuicFlag(quic_buffered_data_threshold) - 1),
stream_->BufferedDataBytes());
EXPECT_TRUE(stream_->CanWriteNewData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(1);
stream_->OnCanWrite();
EXPECT_EQ(0u, stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->HasBufferedData());
EXPECT_TRUE(stream_->CanWriteNewData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Return(QuicConsumedData(0, false)));
struct iovec iov = {const_cast<char*>(data.data()), data.length()};
quiche::QuicheMemSliceStorage storage(
&iov, 1, session_->connection()->helper()->GetStreamSendBufferAllocator(),
1024);
QuicConsumedData consumed = stream_->WriteMemSlices(storage.ToSpan(), false);
EXPECT_EQ(data.length(), consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_EQ(data.length(), stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->CanWriteNewData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _)).Times(0);
quiche::QuicheMemSliceStorage storage2(
&iov, 1, session_->connection()->helper()->GetStreamSendBufferAllocator(),
1024);
consumed = stream_->WriteMemSlices(storage2.ToSpan(), false);
EXPECT_EQ(0u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_EQ(data.length(), stream_->BufferedDataBytes());
data_to_write = data.length() - GetQuicFlag(quic_buffered_data_threshold) + 1;
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this, data_to_write]() {
return session_->ConsumeData(stream_->id(), data_to_write, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(1);
stream_->OnCanWrite();
EXPECT_EQ(
static_cast<uint64_t>(GetQuicFlag(quic_buffered_data_threshold) - 1),
stream_->BufferedDataBytes());
EXPECT_TRUE(stream_->CanWriteNewData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _)).Times(0);
quiche::QuicheMemSliceStorage storage3(
&iov, 1, session_->connection()->helper()->GetStreamSendBufferAllocator(),
1024);
consumed = stream_->WriteMemSlices(storage3.ToSpan(), false);
EXPECT_EQ(data.length(), consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_EQ(data.length() + GetQuicFlag(quic_buffered_data_threshold) - 1,
stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->CanWriteNewData());
}
TEST_P(QuicStreamTest, WritevDataReachStreamLimit) {
Initialize();
std::string data("aaaaa");
QuicStreamPeer::SetStreamBytesWritten(kMaxStreamLength - data.length(),
stream_);
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
struct iovec iov = {const_cast<char*>(data.data()), 5u};
quiche::QuicheMemSliceStorage storage(
&iov, 1, session_->connection()->helper()->GetStreamSendBufferAllocator(),
1024);
QuicConsumedData consumed = stream_->WriteMemSlices(storage.ToSpan(), false);
EXPECT_EQ(data.length(), consumed.bytes_consumed);
struct iovec iov2 = {const_cast<char*>(data.data()), 1u};
quiche::QuicheMemSliceStorage storage2(
&iov2, 1,
session_->connection()->helper()->GetStreamSendBufferAllocator(), 1024);
EXPECT_QUIC_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _));
stream_->WriteMemSlices(storage2.ToSpan(), false);
},
"Write too many data via stream");
}
TEST_P(QuicStreamTest, WriteMemSlices) {
SetQuicFlag(quic_buffered_data_threshold, 100);
Initialize();
constexpr QuicByteCount kDataSize = 1024;
quiche::QuicheBufferAllocator* allocator =
connection_->helper()->GetStreamSendBufferAllocator();
std::vector<quiche::QuicheMemSlice> vector1;
vector1.push_back(
quiche::QuicheMemSlice(quiche::QuicheBuffer(allocator, kDataSize)));
vector1.push_back(
quiche::QuicheMemSlice(quiche::QuicheBuffer(allocator, kDataSize)));
std::vector<quiche::QuicheMemSlice> vector2;
vector2.push_back(
quiche::QuicheMemSlice(quiche::QuicheBuffer(allocator, kDataSize)));
vector2.push_back(
quiche::QuicheMemSlice(quiche::QuicheBuffer(allocator, kDataSize)));
absl::Span<quiche::QuicheMemSlice> span1(vector1);
absl::Span<quiche::QuicheMemSlice> span2(vector2);
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 100u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
QuicConsumedData consumed = stream_->WriteMemSlices(span1, false);
EXPECT_EQ(2048u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_EQ(2 * kDataSize - 100, stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->fin_buffered());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _)).Times(0);
consumed = stream_->WriteMemSlices(span2, true);
EXPECT_EQ(0u, consumed.bytes_consumed);
EXPECT_FALSE(consumed.fin_consumed);
EXPECT_EQ(2 * kDataSize - 100, stream_->BufferedDataBytes());
EXPECT_FALSE(stream_->fin_buffered());
QuicByteCount data_to_write =
2 * kDataSize - 100 - GetQuicFlag(quic_buffered_data_threshold) + 1;
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this, data_to_write]() {
return session_->ConsumeData(stream_->id(), data_to_write, 100u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(1);
stream_->OnCanWrite();
EXPECT_EQ(
static_cast<uint64_t>(GetQuicFlag(quic_buffered_data_threshold) - 1),
stream_->BufferedDataBytes());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _)).Times(0);
consumed = stream_->WriteMemSlices(span2, true);
EXPECT_EQ(2048u, consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_EQ(2 * kDataSize + GetQuicFlag(quic_buffered_data_threshold) - 1,
stream_->BufferedDataBytes());
EXPECT_TRUE(stream_->fin_buffered());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->OnCanWrite();
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(0);
EXPECT_FALSE(stream_->HasBufferedData());
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_P(QuicStreamTest, WriteMemSlicesReachStreamLimit) {
Initialize();
QuicStreamPeer::SetStreamBytesWritten(kMaxStreamLength - 5u, stream_);
std::vector<std::pair<char*, size_t>> buffers;
quiche::QuicheMemSlice slice1 = MemSliceFromString("12345");
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 5u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
QuicConsumedData consumed = stream_->WriteMemSlice(std::move(slice1), false);
EXPECT_EQ(5u, consumed.bytes_consumed);
quiche::QuicheMemSlice slice2 = MemSliceFromString("6");
EXPECT_QUIC_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_STREAM_LENGTH_OVERFLOW, _, _));
stream_->WriteMemSlice(std::move(slice2), false);
},
"Write too many data via stream");
}
TEST_P(QuicStreamTest, StreamDataGetAckedMultipleTimes) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData1, true, nullptr);
EXPECT_EQ(3u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
QuicByteCount newly_acked_length = 0;
EXPECT_TRUE(stream_->OnStreamFrameAcked(0, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(9u, newly_acked_length);
EXPECT_EQ(2u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->OnStreamFrameAcked(5, 17, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(13u, newly_acked_length);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->OnStreamFrameAcked(18, 8, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(4u, newly_acked_length);
EXPECT_EQ(1u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_TRUE(stream_->OnStreamFrameAcked(26, 1, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(1u, newly_acked_length);
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_TRUE(stream_->IsWaitingForAcks());
EXPECT_TRUE(session_->HasUnackedStreamData());
EXPECT_CALL(*stream_, OnWriteSideInDataRecvdState()).Times(1);
EXPECT_TRUE(stream_->OnStreamFrameAcked(27, 0, true, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(0u, newly_acked_length);
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
EXPECT_FALSE(
stream_->OnStreamFrameAcked(10, 17, true, QuicTime::Delta::Zero(),
QuicTime::Zero(), &newly_acked_length));
EXPECT_EQ(0u, newly_acked_length);
EXPECT_EQ(0u, QuicStreamPeer::SendBuffer(stream_).size());
EXPECT_FALSE(stream_->IsWaitingForAcks());
EXPECT_FALSE(session_->HasUnackedStreamData());
}
TEST_P(QuicStreamTest, OnStreamFrameLost) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(kData1, false, nullptr);
EXPECT_FALSE(stream_->HasBufferedData());
EXPECT_TRUE(stream_->IsStreamFrameOutstanding(0, 9, false));
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Return(QuicConsumedData(0, false)));
stream_->WriteOrBufferData(kData2, false, nullptr);
stream_->WriteOrBufferData(kData2, false, nullptr);
EXPECT_TRUE(stream_->HasBufferedData());
EXPECT_FALSE(stream_->HasPendingRetransmission());
stream_->OnStreamFrameLost(0, 9, false);
EXPECT_TRUE(stream_->HasPendingRetransmission());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*stream_, OnCanWriteNewData()).Times(1);
stream_->OnCanWrite();
EXPECT_FALSE(stream_->HasPendingRetransmission());
EXPECT_TRUE(stream_->HasBufferedData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->OnCanWrite();
EXPECT_FALSE(stream_->HasBufferedData());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData("", true, nullptr);
stream_->OnStreamFrameLost(9, 18, false);
stream_->OnStreamFrameLost(27, 0, true);
EXPECT_TRUE(stream_->HasPendingRetransmission());
QuicByteCount newly_acked_length = 0;
EXPECT_TRUE(stream_->OnStreamFrameAcked(9, 9, false, QuicTime::Delta::Zero(),
QuicTime::Zero(),
&newly_acked_length));
EXPECT_EQ(9u, newly_acked_length);
EXPECT_FALSE(stream_->IsStreamFrameOutstanding(9, 3, false));
EXPECT_TRUE(stream_->HasPendingRetransmission());
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 9u, 18u, FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
stream_->OnCanWrite();
EXPECT_FALSE(stream_->HasPendingRetransmission());
stream_->OnStreamFrameLost(9, 9, false);
EXPECT_FALSE(stream_->HasPendingRetransmission());
EXPECT_TRUE(stream_->IsStreamFrameOutstanding(27, 0, true));
}
TEST_P(QuicStreamTest, CannotBundleLostFin) {
Initialize();
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData2, true, nullptr);
stream_->OnStreamFrameLost(0, 9, false);
stream_->OnStreamFrameLost(18, 0, true);
InSequence s;
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 9u, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillOnce(Return(QuicConsumedData(0, true)));
stream_->OnCanWrite();
}
TEST_P(QuicStreamTest, MarkConnectionLevelWriteBlockedOnWindowUpdateFrame) {
Initialize();
QuicConfigPeer::SetReceivedInitialStreamFlowControlWindow(session_->config(),
100);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_->config(), 100);
auto stream = new TestStream(GetNthClientInitiatedBidirectionalStreamId(
GetParam().transport_version, 2),
session_.get(), BIDIRECTIONAL);
session_->ActivateStream(absl::WrapUnique(stream));
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
std::string data(1024, '.');
stream->WriteOrBufferData(data, false, nullptr);
EXPECT_FALSE(HasWriteBlockedStreams());
QuicWindowUpdateFrame window_update(kInvalidControlFrameId, stream_->id(),
1234);
stream->OnWindowUpdateFrame(window_update);
EXPECT_TRUE(HasWriteBlockedStreams());
EXPECT_TRUE(stream->HasBufferedData());
}
TEST_P(QuicStreamTest,
MarkConnectionLevelWriteBlockedOnWindowUpdateFrameWithNoBufferedData) {
Initialize();
QuicConfigPeer::SetReceivedInitialStreamFlowControlWindow(session_->config(),
100);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_->config(), 100);
auto stream = new TestStream(GetNthClientInitiatedBidirectionalStreamId(
GetParam().transport_version, 2),
session_.get(), BIDIRECTIONAL);
session_->ActivateStream(absl::WrapUnique(stream));
std::string data(100, '.');
EXPECT_CALL(*session_, WritevData(_, _, _, _, _, _))
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*session_, SendBlocked(_, _)).Times(1);
stream->WriteOrBufferData(data, false, nullptr);
EXPECT_FALSE(HasWriteBlockedStreams());
QuicWindowUpdateFrame window_update(kInvalidControlFrameId, stream_->id(),
120);
stream->OnWindowUpdateFrame(window_update);
EXPECT_FALSE(stream->HasBufferedData());
EXPECT_TRUE(HasWriteBlockedStreams());
}
TEST_P(QuicStreamTest, RetransmitStreamData) {
Initialize();
InSequence s;
EXPECT_CALL(*session_, WritevData(stream_->id(), _, _, _, _, _))
.Times(2)
.WillRepeatedly(Invoke(session_.get(), &MockQuicSession::ConsumeData));
stream_->WriteOrBufferData(kData1, false, nullptr);
stream_->WriteOrBufferData(kData1, true, nullptr);
QuicByteCount newly_acked_length = 0;
stream_->OnStreamFrameAcked(10, 3, false, QuicTime::Delta::Zero(),
QuicTime::Zero(), &newly_acked_length);
EXPECT_EQ(3u, newly_acked_length);
EXPECT_CALL(*session_, WritevData(stream_->id(), 10, 0, NO_FIN, _, _))
.WillOnce(InvokeWithoutArgs([this]() {
return session_->ConsumeData(stream_->id(), 8, 0u, NO_FIN,
NOT_RETRANSMISSION, std::nullopt);
}));
EXPECT_FALSE(stream_->RetransmitStreamData(0, 18, true, PTO_RETRANSMISSION));
EXPECT_CALL(*session_, WritevData(stream_->id(), 10, 0, NO_FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*session_, WritevData(stream_->id(), 5, 13, FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_TRUE(stream_->RetransmitStreamData(0, 18, true, PTO_RETRANSMISSION));
EXPECT_CALL(*session_, WritevData(stream_->id(), 8, 0, NO_FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_CALL(*session_, WritevData(stream_->id(), 0, 18, FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_TRUE(stream_->RetransmitStreamData(0, 8, true, PTO_RETRANSMISSION));
}
TEST_P(QuicStreamTest, ResetStreamOnTtlExpiresRetransmitLostData) {
Initialize();
EXPECT_CALL(*session_, WritevData(stream_->id(), 200, 0, FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
std::string body(200, 'a');
stream_->WriteOrBufferData(body, true, nullptr);
QuicTime::Delta ttl = QuicTime::Delta::FromSeconds(1);
ASSERT_TRUE(stream_->MaybeSetTtl(ttl));
EXPECT_CALL(*session_, WritevData(stream_->id(), 100, 0, NO_FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
EXPECT_TRUE(stream_->RetransmitStreamData(0, 100, false, PTO_RETRANSMISSION));
stream_->OnStreamFrameLost(100, 100, true);
EXPECT_TRUE(stream_->HasPendingRetransmission());
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
if (session_->version().UsesHttp3()) {
EXPECT_CALL(*session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_TTL_EXPIRED)))
.Times(1);
}
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_STREAM_TTL_EXPIRED), _))
.Times(1);
stream_->OnCanWrite();
}
TEST_P(QuicStreamTest, ResetStreamOnTtlExpiresEarlyRetransmitData) {
Initialize();
EXPECT_CALL(*session_, WritevData(stream_->id(), 200, 0, FIN, _, _))
.WillOnce(Invoke(session_.get(), &MockQuicSession::ConsumeData));
std::string body(200, 'a');
stream_->WriteOrBufferData(body, true, nullptr);
QuicTime::Delta ttl = QuicTime::Delta::FromSeconds(1);
ASSERT_TRUE(stream_->MaybeSetTtl(ttl));
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
if (session_->version().UsesHttp3()) {
EXPECT_CALL(*session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_TTL_EXPIRED)))
.Times(1);
}
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_STREAM_TTL_EXPIRED), _))
.Times(1);
stream_->RetransmitStreamData(0, 100, false, PTO_RETRANSMISSION);
}
TEST_P(QuicStreamTest, OnStreamResetReadOrReadWrite) {
Initialize();
EXPECT_FALSE(stream_->write_side_closed());
EXPECT_FALSE(QuicStreamPeer::read_side_closed(stream_));
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(connection_->transport_version())) {
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
EXPECT_FALSE(stream_->write_side_closed());
} else {
EXPECT_TRUE(stream_->write_side_closed());
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream_));
}
}
TEST_P(QuicStreamTest, WindowUpdateForReadOnlyStream) {
Initialize();
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
connection_->transport_version(), Perspective::IS_CLIENT);
TestStream stream(stream_id, session_.get(), READ_UNIDIRECTIONAL);
QuicWindowUpdateFrame window_update_frame(kInvalidControlFrameId, stream_id,
0);
EXPECT_CALL(
*connection_,
CloseConnection(
QUIC_WINDOW_UPDATE_RECEIVED_ON_READ_UNIDIRECTIONAL_STREAM,
"WindowUpdateFrame received on READ_UNIDIRECTIONAL stream.", _));
stream.OnWindowUpdateFrame(window_update_frame);
}
TEST_P(QuicStreamTest, RstStreamFrameChangesCloseOffset) {
Initialize();
QuicStreamFrame stream_frame(stream_->id(), true, 0, "abc");
EXPECT_CALL(*stream_, OnDataAvailable());
stream_->OnStreamFrame(stream_frame);
QuicRstStreamFrame rst(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 0u);
EXPECT_CALL(*connection_, CloseConnection(QUIC_STREAM_MULTIPLE_OFFSET, _, _));
stream_->OnStreamReset(rst);
}
TEST_P(QuicStreamTest, EmptyStreamFrameWithNoFin) {
Initialize();
QuicStreamFrame empty_stream_frame(stream_->id(), false, 0, "");
if (stream_->version().HasIetfQuicFrames()) {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_EMPTY_STREAM_FRAME_NO_FIN, _, _))
.Times(0);
} else {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_EMPTY_STREAM_FRAME_NO_FIN, _, _));
}
EXPECT_CALL(*stream_, OnDataAvailable()).Times(0);
stream_->OnStreamFrame(empty_stream_frame);
}
TEST_P(QuicStreamTest, SendRstWithCustomIetfCode) {
Initialize();
QuicResetStreamError error(QUIC_STREAM_CANCELLED, 0x1234abcd);
EXPECT_CALL(*session_, MaybeSendRstStreamFrame(kTestStreamId, error, _))
.Times(1);
stream_->ResetWithError(error);
EXPECT_TRUE(rst_sent());
}
TEST_P(QuicStreamTest, ResetWhenOffsetReached) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 400, 100);
stream_->OnResetStreamAtFrame(rst);
char data[100];
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(99);
});
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, absl::string_view(data, 99)));
EXPECT_FALSE(stream_->rst_received());
EXPECT_FALSE(stream_->read_side_closed());
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(1);
});
stream_->OnStreamFrame(QuicStreamFrame(stream_->id(), false, 99,
absl::string_view(data + 99, 1)));
EXPECT_TRUE(stream_->rst_received());
EXPECT_TRUE(stream_->read_side_closed());
}
TEST_P(QuicStreamTest, ResetWhenOffsetReachedOutOfOrder) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 400, 100);
stream_->OnResetStreamAtFrame(rst);
char data[100];
stream_->OnStreamFrame(QuicStreamFrame(stream_->id(), false, 99,
absl::string_view(data + 99, 1)));
EXPECT_FALSE(stream_->rst_received());
EXPECT_FALSE(stream_->read_side_closed());
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(100);
});
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, absl::string_view(data, 99)));
EXPECT_TRUE(stream_->rst_received());
EXPECT_TRUE(stream_->read_side_closed());
}
TEST_P(QuicStreamTest, HigherReliableSizeIgnored) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 400, 100);
stream_->OnResetStreamAtFrame(rst);
QuicResetStreamAtFrame rst2(0, stream_->id(), QUIC_STREAM_CANCELLED, 400,
200);
stream_->OnResetStreamAtFrame(rst2);
char data[100];
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(99);
});
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, absl::string_view(data, 99)));
EXPECT_FALSE(stream_->rst_received());
EXPECT_FALSE(stream_->read_side_closed());
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(1);
});
stream_->OnStreamFrame(QuicStreamFrame(stream_->id(), false, 99,
absl::string_view(data + 99, 1)));
EXPECT_TRUE(stream_->rst_received());
EXPECT_TRUE(stream_->read_side_closed());
}
TEST_P(QuicStreamTest, InstantReset) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
char data[100];
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(100);
});
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, absl::string_view(data, 100)));
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 400, 100);
EXPECT_FALSE(stream_->rst_received());
EXPECT_FALSE(stream_->read_side_closed());
stream_->OnResetStreamAtFrame(rst);
EXPECT_TRUE(stream_->rst_received());
EXPECT_TRUE(stream_->read_side_closed());
}
TEST_P(QuicStreamTest, ResetIgnoredDueToFin) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
char data[100];
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(98);
});
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), false, 0, absl::string_view(data, 98)));
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 100, 99);
stream_->OnResetStreamAtFrame(rst);
EXPECT_FALSE(stream_->rst_received());
EXPECT_FALSE(stream_->read_side_closed());
EXPECT_CALL(*stream_, OnDataAvailable()).WillOnce([this]() {
stream_->ConsumeData(2);
stream_->OnFinRead();
});
stream_->OnStreamFrame(QuicStreamFrame(stream_->id(), true, 98,
absl::string_view(data + 98, 2)));
EXPECT_FALSE(stream_->rst_received());
EXPECT_TRUE(stream_->read_side_closed());
}
TEST_P(QuicStreamTest, ReliableOffsetBeyondFin) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
char data[100];
stream_->OnStreamFrame(QuicStreamFrame(stream_->id(), true, 98,
absl::string_view(data + 98, 2)));
EXPECT_CALL(*connection_, CloseConnection(QUIC_STREAM_MULTIPLE_OFFSET, _, _))
.Times(1);
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 101, 101);
stream_->OnResetStreamAtFrame(rst);
}
TEST_P(QuicStreamTest, FinBeforeReliableOffset) {
Initialize();
if (!VersionHasIetfQuicFrames(session_->transport_version())) {
return;
}
QuicResetStreamAtFrame rst(0, stream_->id(), QUIC_STREAM_CANCELLED, 101, 101);
stream_->OnResetStreamAtFrame(rst);
char data[100];
EXPECT_CALL(*connection_, CloseConnection(QUIC_STREAM_MULTIPLE_OFFSET, _, _))
.Times(1);
stream_->OnStreamFrame(
QuicStreamFrame(stream_->id(), true, 0, absl::string_view(data, 100)));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b3cf634f-2abf-4078-a994-6116125692a0 | cpp | tensorflow/tensorflow | collective_select_folder | third_party/xla/xla/service/gpu/transforms/collective_select_folder.cc | third_party/xla/xla/service/gpu/transforms/collective_select_folder_test.cc | #include "xla/service/gpu/transforms/collective_select_folder.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using SourceTargetPair = std::pair<int64_t, int64_t>;
using SourceTargetPairs = std::vector<SourceTargetPair>;
struct SelectPredInfo {
int64_t constant;
Comparison::Direction direction;
HloOpcode device_id_type;
HloInstruction* true_operand;
HloInstruction* false_operand;
};
std::optional<SelectPredInfo> GetPredSelectInfo(HloInstruction* select) {
if (select->opcode() != HloOpcode::kSelect) {
return std::nullopt;
}
const HloInstruction* compare_candidate = select->operand(0);
if (compare_candidate->opcode() != HloOpcode::kCompare) {
compare_candidate = compare_candidate->operand(0);
}
if (compare_candidate->opcode() != HloOpcode::kCompare) {
return std::nullopt;
}
const HloCompareInstruction* compare =
DynCast<HloCompareInstruction>(compare_candidate);
if ((compare->operand(0)->opcode() != HloOpcode::kReplicaId &&
compare->operand(0)->opcode() != HloOpcode::kPartitionId) ||
compare->operand(1)->opcode() != HloOpcode::kConstant) {
return std::nullopt;
}
int64_t id_value =
compare->operand(1)->literal().GetFirstInteger().value_or(-1);
return SelectPredInfo{id_value, compare->direction(),
compare->operand(0)->opcode(),
select->mutable_operand(1), select->mutable_operand(2)};
}
bool IsUniqueSource(int64_t device_id, const SourceTargetPairs& pairs) {
if (pairs.size() == 1 && pairs[0].first == device_id) return true;
return false;
}
bool IsNotPresentInSource(int64_t device_id, const SourceTargetPairs& pairs) {
return absl::c_none_of(
pairs, [device_id](const auto& pair) { return pair.first == device_id; });
}
inline absl::StatusOr<bool> update(HloInstruction* cp, HloInstruction* data) {
TF_RETURN_IF_ERROR(cp->ReplaceOperandWith(0, data));
return true;
}
bool IsShardingConsistent(HloCollectivePermuteInstruction* cp,
HloOpcode device_id_type) {
auto id = cp->channel_id();
return (device_id_type == HloOpcode::kPartitionId && id.has_value()) ||
(device_id_type == HloOpcode::kReplicaId && !id.has_value());
}
absl::StatusOr<bool> TryFoldSelect(HloInstruction* in) {
if (in->opcode() != HloOpcode::kCollectivePermute) return false;
auto select_info_opt = GetPredSelectInfo(in->mutable_operand(0));
if (!select_info_opt.has_value()) return false;
auto select_info = select_info_opt.value();
HloCollectivePermuteInstruction* cp =
Cast<HloCollectivePermuteInstruction>(in);
if (!IsShardingConsistent(cp, select_info.device_id_type)) return false;
int64_t device_id = select_info.constant;
SourceTargetPairs pairs = cp->source_target_pairs();
if (select_info.direction == Comparison::Direction::kEq) {
if (IsUniqueSource(device_id, pairs)) {
return update(cp, select_info.true_operand);
} else if (IsNotPresentInSource(device_id, pairs)) {
return update(cp, select_info.false_operand);
}
}
if (select_info.direction == Comparison::Direction::kNe) {
if (IsNotPresentInSource(device_id, pairs)) {
return update(cp, select_info.true_operand);
} else if (IsUniqueSource(device_id, pairs)) {
return update(cp, select_info.false_operand);
}
}
return false;
}
}
absl::StatusOr<bool> CollectiveSelectFolder::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
TF_ASSIGN_OR_RETURN(bool local_changed, TryFoldSelect(instruction));
changed |= local_changed;
}
}
return changed;
}
} | #include "xla/service/gpu/transforms/collective_select_folder.h"
#include <initializer_list>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
class CollectiveSelectFolderTest : public HloTestBase {
public:
absl::Status ExpectNoTranform(std::string_view hlo_template) {
return RunAndCheckHloRewrite(hlo_template, CollectiveSelectFolder(), false)
.status();
}
};
void VerifyDirectDataFeedSPMD(HloModule* module,
std::string_view expected_fwd_operand,
std::string_view expected_bwd_operand) {
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kSelect);
EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kCollectivePermute);
EXPECT_EQ(root->operand(2)->opcode(), HloOpcode::kCollectivePermute);
EXPECT_THAT(root->operand(1)->operand(0)->name(),
HasSubstr(expected_bwd_operand))
<< root->operand(1)->name() << " is expected to operate on "
<< expected_bwd_operand;
EXPECT_THAT(root->operand(2)->operand(0)->name(),
HasSubstr(expected_fwd_operand))
<< root->operand(2)->name() << " is expected to operate on "
<< expected_fwd_operand;
}
const char* kSPMD2cp = R"(
HloModule test
ENTRY circular_exchange {
in_tpl = (f32[16], f32[16]) parameter(0)
fwd_data = f32[16]{0} get-tuple-element(in_tpl), index=0
bwd_data = f32[16]{0} get-tuple-element(in_tpl), index=1
c_first_id = u32[] constant($first_id_constant)
c_last_id = u32[] constant($last_id_constant)
repl_id = u32[] partition-id()
pred_first_id = pred[] compare(repl_id, c_first_id), direction=EQ
is_first = pred[] broadcast(pred_first_id), dimensions={}
pred_last_id = pred[] compare(repl_id, c_last_id), direction=EQ
is_last = pred[] broadcast(pred_last_id), dimensions={}
data_snd = f32[16] select(is_last, bwd_data, fwd_data)
bwd_data_rcv = f32[16] collective-permute(data_snd), channel_id=1, source_target_pairs=$backward_pairs
fwd_data_rcv = f32[16] collective-permute(data_snd), channel_id=2, source_target_pairs=$forward_pairs
ROOT data_rcv = f32[16] select(is_first, bwd_data_rcv, fwd_data_rcv)
}
)";
TEST_F(CollectiveSelectFolderTest, SimpleForwardCycle) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(),
true,
{{"$first_id_constant", "0"},
{"$last_id_constant", "3"},
{"$forward_pairs", "{{0,1},{1,2},{2,3}}"},
{"$backward_pairs", "{{3,0}}"}}));
VerifyDirectDataFeedSPMD(module.get(), "fwd_data", "bwd_data");
}
TEST_F(CollectiveSelectFolderTest, SimpleBackwardCycle) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(),
true,
{{"$first_id_constant", "3"},
{"$last_id_constant", "0"},
{"$forward_pairs", "{{3,2},{2,1},{1,0}}"},
{"$backward_pairs", "{{0,3}}"}}));
VerifyDirectDataFeedSPMD(module.get(), "fwd_data", "bwd_data");
}
TEST_F(CollectiveSelectFolderTest, CompareNEForwardCycle) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(),
true,
{{"$first_id_constant", "0"},
{"$last_id_constant", "3"},
{"$forward_pairs", "{{0,1},{1,2},{2,3}}"},
{"$backward_pairs", "{{3,0}}"},
{"direction=EQ", "direction=NE"}}));
VerifyDirectDataFeedSPMD(module.get(), "bwd_data", "fwd_data");
}
TEST_F(CollectiveSelectFolderTest, LastDeviceIdMismatch) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(),
true,
{{"$first_id_constant", "0"},
{"$last_id_constant", "2"},
{"$forward_pairs", "{{0,1},{1,2},{2,3}}"},
{"$backward_pairs", "{{3,0}}"}}));
VerifyDirectDataFeedSPMD(module.get(), "data_snd", "fwd_data");
}
const char* kSelectBasecase = R"(
HloModule test
ENTRY computation1 {
compare_true_data = f32[16] parameter(0)
compare_false_data = f32[16] parameter(1)
device_id_constant = u32[] constant($device_id_constant)
repl_id = u32[] replica-id()
prd = pred[] compare(repl_id, device_id_constant), direction=$direction
bcast = pred[] broadcast(prd), dimensions={}
selected_data = f32[16] select(bcast, compare_true_data, compare_false_data)
ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs=$pairs
}
)";
TEST_F(CollectiveSelectFolderTest, EqualTrueBranchTransform) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
true,
{{"$device_id_constant", "3"},
{"$direction", "EQ"},
{"$pairs", "{{3,0}}"}}));
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0)->name(), "compare_true_data");
}
TEST_F(CollectiveSelectFolderTest, EqualFalseBranchTransform) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
true,
{{"$device_id_constant", "3"},
{"$direction", "EQ"},
{"$pairs", "{{0,1},{1,2}}"}}));
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0)->name(), "compare_false_data");
}
TEST_F(CollectiveSelectFolderTest, NotEqualFalseBranchTransform) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
true,
{{"$device_id_constant", "3"},
{"$direction", "NE"},
{"$pairs", "{{3,0}}"}}));
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0)->name(), "compare_false_data");
}
TEST_F(CollectiveSelectFolderTest, NotEqualTrueTrueTransform) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
true,
{{"$device_id_constant", "3"},
{"$direction", "NE"},
{"$pairs", "{{0,1},{1,2},{4,5},{5,6}}"}}));
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0)->name(), "compare_true_data");
}
TEST_F(CollectiveSelectFolderTest, MoreThanOnePair_NotTransformed) {
TF_ASSERT_OK(RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
false,
{{"$device_id_constant", "1"},
{"$direction", "EQ"},
{"$pairs", "{{0,1},{1,2}}"}}));
TF_ASSERT_OK(RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(),
false,
{{"$device_id_constant", "1"},
{"$direction", "NE"},
{"$pairs", "{{0,1},{1,2}}"}}));
}
const char* kSelectNoBroadcast = R"(
HloModule test
ENTRY computation1 {
compare_true_data = f32[16] parameter(0)
compare_false_data = f32[16] parameter(1)
device_id_constant = u32[] constant($device_id_constant)
repl_id = u32[] replica-id()
prd = pred[] compare(repl_id, device_id_constant), direction=$direction
selected_data = f32[16] select(prd, compare_true_data, compare_false_data)
ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs=$pairs
}
)";
TEST_F(CollectiveSelectFolderTest, SelectNoBroadcastTransform) {
TF_ASSERT_OK_AND_ASSIGN(
auto module,
RunAndCheckHloRewrite(kSelectNoBroadcast, CollectiveSelectFolder(),
true,
{{"$device_id_constant", "3"},
{"$direction", "EQ"},
{"$pairs", "{{3,0}}"}}));
auto root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->operand(0)->name(), "compare_true_data");
}
TEST_F(CollectiveSelectFolderTest, ReplicaIdChannelIdMismatch_NotTransformed) {
const absl::string_view hlo = R"(
HloModule test
ENTRY computation1 {
compare_true_data = f32[16] parameter(0)
compare_false_data = f32[16] parameter(1)
device_id_constant = u32[] constant(0)
repl_id = u32[] replica-id()
prd = pred[] compare(repl_id, device_id_constant), direction=EQ
selected_data = f32[16] select(prd, compare_true_data, compare_false_data)
ROOT data_rcv = f32[16] collective-permute(selected_data), channel_id=1, source_target_pairs={{0,1}}
}
)";
TF_ASSERT_OK(ExpectNoTranform(hlo));
}
TEST_F(CollectiveSelectFolderTest, PartIdChannelIdMismatch_NotTransformed) {
const absl::string_view hlo = R"(
HloModule test
ENTRY computation1 {
compare_true_data = f32[16] parameter(0)
compare_false_data = f32[16] parameter(1)
device_id_constant = u32[] constant(0)
repl_id = u32[] partition-id()
prd = pred[] compare(repl_id, device_id_constant), direction=EQ
selected_data = f32[16] select(prd, compare_true_data, compare_false_data)
ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs={{0,1}}
}
)";
TF_ASSERT_OK(ExpectNoTranform(hlo));
}
TEST_F(CollectiveSelectFolderTest, WrongNesting_NotTransformed) {
const absl::string_view hlo = R"(
HloModule test
ENTRY computation1 {
compare_true_data = f32[16] parameter(0)
compare_false_data = f32[16] parameter(1)
device_id_constant = u32[] constant(0)
repl_id = u32[] replica-id()
sum = u32[] add(device_id_constant, repl_id)
prd = pred[] compare(sum, device_id_constant), direction=EQ
selected_data = f32[16] select(prd, compare_true_data, compare_false_data)
ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs={{0,1}}
}
)";
TF_ASSERT_OK(ExpectNoTranform(hlo));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_select_folder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_select_folder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7af3d205-5d2b-4bdd-a060-042589eba699 | cpp | tensorflow/tensorflow | f16 | tensorflow/lite/experimental/shlo/legacy/src/f16.h | tensorflow/lite/experimental/shlo/f16_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_F16_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_F16_H_
#include "tensorflow/lite/experimental/shlo/legacy/src/has_keyword.h"
#if defined(__STDCPP_FLOAT16_T__)
#include <stdfloat>
namespace stablehlo {
using F16 = float16_t;
}
#elif __has_keyword(_Float16)
namespace stablehlo {
using F16 = _Float16;
}
#else
#error Type F16 is not available
#endif
#endif | #include "tensorflow/lite/experimental/shlo/f16.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
namespace shlo_ref {
namespace {
using ::testing::FloatNear;
using RoundtripTypeList = ::testing::Types<float, double>;
template <class T>
struct RoundtripF16Test : testing::Test {};
TYPED_TEST_SUITE(RoundtripF16Test, RoundtripTypeList);
TYPED_TEST(RoundtripF16Test, RoundtripConversions) {
for (TypeParam value : {
-std::numeric_limits<TypeParam>::infinity(),
std::numeric_limits<TypeParam>::infinity(),
TypeParam(-1.0),
TypeParam(-0.5),
TypeParam(-0.0),
TypeParam(1.0),
TypeParam(0.5),
TypeParam(0.0),
}) {
EXPECT_EQ(value, static_cast<TypeParam>(static_cast<F16>(value)));
}
}
TEST(F16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(F16(2) + F16(2)), 4);
EXPECT_EQ(static_cast<float>(F16(2) + F16(-2)), 0);
EXPECT_THAT(static_cast<float>(F16(0.33333f) + F16(0.66667f)),
FloatNear(1.0f, 1e-3));
EXPECT_EQ(static_cast<float>(F16(2.0f) * F16(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(F16(1.0f) / F16(3.0f)),
FloatNear(0.3339f, 1e-3));
EXPECT_EQ(static_cast<float>(-F16(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-F16(-4096.0f)), 4096.0f);
}
TEST(F16Test, DefaultConstruct) { EXPECT_EQ(static_cast<float>(F16()), 0.0f); }
TEST(F16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<F16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<F16, uint16_t>(0x3C00)), 1.0f);
}
TEST(F16Test, ConstructFromArithmeticType) {
const F16 from_int8(static_cast<int8_t>(1));
EXPECT_EQ(static_cast<float>(from_int8), 1);
const F16 from_int16(static_cast<int16_t>(1));
EXPECT_EQ(static_cast<float>(from_int16), 1);
const F16 from_int32(static_cast<int32_t>(1));
EXPECT_EQ(static_cast<float>(from_int32), 1);
const F16 from_int64(static_cast<int64_t>(1));
EXPECT_EQ(static_cast<float>(from_int64), 1);
const F16 from_float(static_cast<float>(1));
EXPECT_EQ(static_cast<float>(from_float), 1);
const F16 from_double(static_cast<double>(1));
EXPECT_EQ(static_cast<float>(from_double), 1);
}
template <class T>
T ImplicitConversion(T v) {
return v;
}
TEST(F16Test, ConvertToArithmeticType) {
const F16 ref(-1);
EXPECT_EQ(ImplicitConversion<int8_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int16_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int32_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int64_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<float>(ref), -1);
EXPECT_EQ(ImplicitConversion<double>(ref), -1);
}
TEST(F16Test, ArithmeticOperations) {
for (int i = -8; i < 8; ++i) {
for (int j = -8; j < 8; ++j) {
EXPECT_EQ(F16(i) == F16(j), i == j);
EXPECT_EQ(F16(i) != F16(j), i != j);
EXPECT_EQ(F16(i) > F16(j), i > j);
EXPECT_EQ(F16(i) >= F16(j), i >= j);
EXPECT_EQ(F16(i) < F16(j), i < j);
EXPECT_EQ(F16(i) <= F16(j), i <= j);
}
}
F16 val(0);
EXPECT_EQ(++val, 1);
EXPECT_EQ(val++, 1);
EXPECT_EQ(val, 2);
EXPECT_EQ(val--, 2);
EXPECT_EQ(val, 1);
EXPECT_EQ(--val, 0);
EXPECT_EQ(val += F16(1), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= F16(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= F16(2), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= F16(4), -3);
EXPECT_EQ(val, -3);
EXPECT_EQ(val = F16(7), 7);
EXPECT_EQ(val, 7);
EXPECT_EQ(+val, 7);
EXPECT_EQ(-val, -7);
EXPECT_EQ(static_cast<bool>(val), true);
EXPECT_EQ(!val, false);
EXPECT_EQ(val && F16(2), true);
EXPECT_EQ(val && F16(0), false);
EXPECT_EQ(val || F16(0), true);
EXPECT_EQ(F16(0) || F16(0), false);
}
using ArithmeticTypeList =
::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
template <class T>
struct ArithmeticTypeF16Test : testing::Test {};
TYPED_TEST_SUITE(ArithmeticTypeF16Test, ArithmeticTypeList);
TYPED_TEST(ArithmeticTypeF16Test, InPlaceArithmetic) {
for (TypeParam i = -8; i < 8; ++i) {
for (TypeParam j = -8; j < 8; ++j) {
EXPECT_EQ(F16(i) == j, i == j);
EXPECT_EQ(i == F16(j), i == j);
EXPECT_EQ(F16(i) != j, i != j);
EXPECT_EQ(i != F16(j), i != j);
EXPECT_EQ(F16(i) > j, i > j);
EXPECT_EQ(i > F16(j), i > j);
EXPECT_EQ(F16(i) >= j, i >= j);
EXPECT_EQ(i >= F16(j), i >= j);
EXPECT_EQ(F16(i) < j, i < j);
EXPECT_EQ(i < F16(j), i < j);
EXPECT_EQ(F16(i) <= j, i <= j);
EXPECT_EQ(i <= F16(j), i <= j);
}
}
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam four = TypeParam(4);
F16 val(0);
EXPECT_EQ(val += one, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= two, 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= two, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= four, -3);
EXPECT_EQ(val, -3);
const F16 f16_three(3);
EXPECT_EQ(f16_three + one, 4.);
EXPECT_EQ(f16_three - one, 2.);
EXPECT_EQ(f16_three * two, 3. * two);
EXPECT_EQ(f16_three / two, 3. / two);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/f16.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/f16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e28284ae-2b25-42a4-a4a2-5aa452414aa7 | cpp | google/arolla | forest_model | arolla/decision_forest/expr_operator/forest_model.cc | arolla/decision_forest/expr_operator/forest_model_test.cc | #include "arolla/decision_forest/expr_operator/forest_model.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::Status ValidateExpression(
const expr::ExprNodePtr& expression,
const ForestModel::SubmodelIds& submodel_ids,
const absl::flat_hash_set<std::string>& input_names) {
absl::flat_hash_set<std::string> unused_submodels;
for (const auto& [k, _] : submodel_ids) unused_submodels.insert(k);
for (const auto& node : expr::VisitorOrder(expression)) {
if (node->is_leaf()) {
return absl::InvalidArgumentError(
"leaves are not allowed in an expression");
}
if (node->is_placeholder()) {
if (submodel_ids.count(node->placeholder_key()) > 0) {
unused_submodels.erase(node->placeholder_key());
} else if (!input_names.contains(node->placeholder_key())) {
return absl::InvalidArgumentError(absl::StrFormat(
"P.%s doesn't correspond to any input and it is not "
"found in submodel_ids",
node->placeholder_key()));
}
}
}
if (!unused_submodels.empty()) {
std::vector<std::string> unused_vec(unused_submodels.begin(),
unused_submodels.end());
absl::c_sort(unused_vec);
return absl::InvalidArgumentError(
absl::StrFormat("submodels [%s] are not used in the expression, but "
"are mentioned in submodel_ids",
absl::StrJoin(unused_vec, ", ")));
}
return absl::OkStatus();
}
absl::Status ValidateInputs(const DecisionForestPtr& forest,
const ForestModel::SubmodelIds& submodel_ids,
const std::vector<ForestModel::Parameter>& inputs) {
for (const auto& input : inputs) {
if (submodel_ids.count(input.name) > 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"name collision of an input and a submodel: '%s'", input.name));
}
}
for (const auto& [key, unused] : forest->GetRequiredQTypes()) {
if (key >= inputs.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"not enough args: used_input_index=%d size=%d", key, inputs.size()));
}
}
return absl::OkStatus();
}
absl::Status ValidateOOBFilters(
const std::vector<expr::ExprNodePtr>& oob_filters,
const DecisionForestPtr& forest,
const absl::flat_hash_set<std::string>& input_names) {
for (const expr::ExprNodePtr& filter : oob_filters) {
if (filter == nullptr) {
return absl::InvalidArgumentError("OOB filter can't be nullptr");
}
for (const auto& node : expr::VisitorOrder(filter)) {
if (node->is_leaf()) {
return absl::InvalidArgumentError(
"leaves are not allowed in an OOB filter expressing");
}
if (node->is_placeholder() &&
!input_names.contains(node->placeholder_key())) {
return absl::InvalidArgumentError(
absl::StrCat("no input matches P.", node->placeholder_key(),
" in OOB filter ", expr::ToDebugString(node)));
}
}
}
return absl::OkStatus();
}
absl::StatusOr<expr::ExprNodePtr> AddAll(
const expr::ExprNodePtr& first, absl::Span<const expr::ExprNodePtr> nodes) {
auto res = first;
for (const auto& node : nodes) {
ASSIGN_OR_RETURN(res, expr::CallOp("math.add", {res, node}));
}
return res;
}
using NodeCountMap = absl::flat_hash_map<Fingerprint, int>;
NodeCountMap GetNodeCountMap(const expr::ExprNodePtr& expr) {
return PostOrderTraverse(expr,
[&](const expr::ExprNodePtr& node,
absl::Span<const NodeCountMap* const> visits) {
NodeCountMap res{{node->fingerprint(), 1}};
for (const NodeCountMap* visit : visits) {
for (const auto& [k, v] : *visit) {
if (res.contains(k)) {
res[k] += v;
} else {
res[k] = v;
}
}
}
return res;
});
}
}
absl::StatusOr<ForestModelPtr> ForestModel::Create(
ForestModel::ConstructorArgs args) {
expr::ExprOperatorSignature signature;
signature.parameters.reserve(args.inputs.size());
for (const Parameter& param : args.inputs) {
signature.parameters.push_back({param.name});
}
RETURN_IF_ERROR(expr::ValidateSignature(signature));
RETURN_IF_ERROR(ValidateInputs(args.forest, args.submodel_ids, args.inputs));
absl::flat_hash_set<std::string> input_names;
input_names.reserve(args.inputs.size());
for (const auto& input : args.inputs) {
input_names.insert(input.name);
}
RETURN_IF_ERROR(
ValidateExpression(args.expression, args.submodel_ids, input_names));
if (args.oob_filters.has_value()) {
RETURN_IF_ERROR(
ValidateOOBFilters(*args.oob_filters, args.forest, input_names));
}
FingerprintHasher hasher("d18261c6a5414ee8e5b0af80dc480ea8");
hasher.Combine(args.forest->fingerprint(), args.expression->fingerprint(),
signature);
hasher.Combine(args.submodel_ids.size());
for (const auto& [k, v] : args.submodel_ids) {
hasher.Combine(k).CombineSpan(v);
}
hasher.Combine(args.inputs.size());
for (const auto& input : args.inputs) {
if (input.preprocessing != nullptr) {
hasher.Combine(input.preprocessing->fingerprint());
} else {
hasher.Combine(Fingerprint{});
}
}
if (args.oob_filters.has_value()) {
for (const auto& oob_filter : *args.oob_filters) {
hasher.Combine(oob_filter->fingerprint());
}
} else {
hasher.Combine(Fingerprint{});
}
if (args.truncation_step.has_value()) {
hasher.Combine(*args.truncation_step);
} else {
hasher.Combine(Fingerprint{});
}
std::shared_ptr<ForestModel> model(new ForestModel(
std::move(signature), std::move(hasher).Finish(), std::move(args)));
RETURN_IF_ERROR(model->Initialize());
return model;
}
absl::StatusOr<std::vector<expr::ExprNodePtr>> ForestModel::PreprocessInputs(
const expr::ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
std::vector<expr::ExprNodePtr> args(inputs_.size());
for (int i = 0; i < inputs_.size(); ++i) {
expr::ExprNodePtr arg = node->node_deps()[i];
if (inputs_[i].preprocessing != nullptr) {
ASSIGN_OR_RETURN(auto lambda,
expr::LambdaOperator::Make(inputs_[i].preprocessing));
ASSIGN_OR_RETURN(arg, expr::CallOp(lambda, {arg}));
ASSIGN_OR_RETURN(arg,
expr::ToLowerNode(arg));
}
if (arg->qtype() == nullptr) {
return absl::InternalError(
absl::StrFormat("invalid preprocessing for input #%d: QType metadata "
"can not be propagated",
i));
}
ASSIGN_OR_RETURN(args[i], CastAndValidateArgType(i, std::move(arg)));
}
return args;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::ApplyPostprocessing(
const expr::ExprNodePtr& node, const expr::ExprNodePtr& raw_result) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
absl::flat_hash_map<std::string, expr::ExprNodePtr> expression_params;
expression_params.reserve(inputs_.size() + 1);
for (int i = 0; i < inputs_.size(); ++i) {
expression_params[inputs_[i].name] = node->node_deps()[i];
}
if (res_tuple_key_) {
if (raw_result == nullptr) {
return absl::InvalidArgumentError(
"raw_result can be nullptr only if expression doesn't use decision "
"forest");
}
expression_params[*res_tuple_key_] = raw_result;
}
ASSIGN_OR_RETURN(auto result, SubstitutePlaceholders(
processed_expression_, expression_params,
true));
if (IsNameAnnotation(node)) {
return expr::CallOp(
"annotation.name",
{result, expr::Literal(Text(expr::ReadNameAnnotation(node)))});
}
return result;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::ToLowerLevel(
const expr::ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
for (size_t i = 0; i < inputs_.size(); ++i) {
if (node->node_deps()[i]->qtype() == nullptr) {
return node;
}
}
if (!res_tuple_key_) {
return ApplyPostprocessing(node, nullptr);
}
ASSIGN_OR_RETURN(std::vector<expr::ExprNodePtr> args, PreprocessInputs(node));
ASSIGN_OR_RETURN(auto op, CreateDecisionForestOperator(tree_filters_));
ASSIGN_OR_RETURN(auto res_tuple, expr::MakeOpNode(op, std::move(args)));
return ApplyPostprocessing(node, res_tuple);
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::CreatePartialEvaluator(
absl::Span<const std::pair<int, int>> step_ranges,
absl::Span<const expr::ExprNodePtr> preprocessed_inputs) const {
std::vector<TreeFilter> filters;
filters.reserve(step_ranges.size() * tree_filters_.size());
for (auto& [from, to] : step_ranges) {
for (const TreeFilter& filter : tree_filters_) {
if ((filter.step_range_from > from) ||
(filter.step_range_to >= 0 && filter.step_range_to < to)) {
return absl::InvalidArgumentError("requested range is not available");
}
filters.push_back({from, to, filter.submodels});
}
}
ASSIGN_OR_RETURN(auto op, CreateDecisionForestOperator(std::move(filters)));
return expr::MakeOpNode(
op, std::vector(preprocessed_inputs.begin(), preprocessed_inputs.end()));
}
absl::StatusOr<QTypePtr>
ForestModel::InferTypeOfFirstForestInputAfterPreprocessing(
absl::Span<const QTypePtr> input_qtypes) const {
if (!first_forest_input_id_.has_value()) {
return absl::FailedPreconditionError("forest has no inputs");
}
QTypePtr in_type = input_qtypes[*first_forest_input_id_];
if (inputs_[*first_forest_input_id_].preprocessing != nullptr) {
ASSIGN_OR_RETURN(auto lambda,
expr::LambdaOperator::Make(
inputs_[*first_forest_input_id_].preprocessing));
ASSIGN_OR_RETURN(expr::ExprAttributes attr,
lambda->InferAttributes({expr::ExprAttributes(in_type)}));
if (attr.qtype() == nullptr) {
return absl::InternalError("can't infer preprocessed input type");
}
return attr.qtype();
} else {
return in_type;
}
}
absl::StatusOr<QTypePtr> ForestModel::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
QTypePtr out_type = GetQType<float>();
if (first_forest_input_id_.has_value()) {
ASSIGN_OR_RETURN(
QTypePtr in_type,
InferTypeOfFirstForestInputAfterPreprocessing(input_qtypes));
if (IsArrayLikeQType(in_type)) {
ASSIGN_OR_RETURN(const ArrayLikeQType* array_qtype,
ToArrayLikeQType(in_type));
ASSIGN_OR_RETURN(out_type,
array_qtype->WithValueQType(GetQType<float>()));
}
}
ASSIGN_OR_RETURN(auto fake_res,
expr::CallOp("annotation.qtype", {expr::Leaf("fake_res"),
expr::Literal(out_type)}));
ASSIGN_OR_RETURN(
auto fake_res_tuple,
expr::BindOp(
"core.make_tuple",
std::vector<expr::ExprNodePtr>(tree_filters_.size(), fake_res), {}));
absl::flat_hash_map<std::string, expr::ExprNodePtr> expression_params;
if (res_tuple_key_) {
expression_params[*res_tuple_key_] = fake_res_tuple;
}
for (int i = 0; i < inputs_.size(); ++i) {
ASSIGN_OR_RETURN(
expression_params[inputs_[i].name],
expr::CallOp("annotation.qtype", {expr::Leaf("fake_input"),
expr::Literal(input_qtypes[i])}));
}
ASSIGN_OR_RETURN(auto expr, SubstitutePlaceholders(
processed_expression_, expression_params,
true));
const auto result = expr->qtype();
if (result == nullptr) {
return absl::FailedPreconditionError("unable to deduce output qtype");
}
return result;
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::CastAndValidateArgType(
int input_id, expr::ExprNodePtr arg) const {
const auto& required_qtypes = forest_->GetRequiredQTypes();
auto required_qtype_iter = required_qtypes.find(input_id);
if (required_qtype_iter == required_qtypes.end()) {
return arg;
}
QTypePtr required_qtype = required_qtype_iter->second;
QTypePtr required_scalar_qtype = DecayOptionalQType(required_qtype);
ASSIGN_OR_RETURN(QTypePtr actual_scalar_qtype, GetScalarQType(arg->qtype()));
if (required_scalar_qtype == GetQType<float>() &&
actual_scalar_qtype != GetQType<float>() &&
IsNumericScalarQType(actual_scalar_qtype)) {
ASSIGN_OR_RETURN(arg,
expr::BindOp("core.to_float32", {std::move(arg)}, {}));
} else if (required_scalar_qtype != actual_scalar_qtype) {
return absl::InvalidArgumentError(
absl::StrFormat("value type of input #%d (%s) doesn't match: "
"expected to be compatible with %s, got %s",
input_id, expr::GetDebugSnippet(arg),
required_qtype->name(), arg->qtype()->name()));
}
if (IsScalarQType(arg->qtype()) && IsOptionalQType(required_qtype)) {
ASSIGN_OR_RETURN(arg,
expr::BindOp("core.to_optional", {std::move(arg)}, {}));
}
return arg;
}
absl::StatusOr<ForestModel::ExpressionAnalysisResult>
ForestModel::AnalyzeExpression() const {
ExpressionAnalysisResult res;
ASSIGN_OR_RETURN(auto expression, expr::ToLowest(expression_));
for (const auto& node : expr::VisitorOrder(expression)) {
if (node->is_op()) {
ASSIGN_OR_RETURN(auto op, expr::DecayRegisteredOperator(node->op()));
res.plain_sum = res.plain_sum && expr::IsBackendOperator(op, "math.add");
} else if (node->is_placeholder() &&
submodel_ids_.count(node->placeholder_key()) > 0) {
res.submodel_nodes.push_back(node);
const auto& submodels = submodel_ids_.at(node->placeholder_key());
if (submodels.empty()) {
return absl::InvalidArgumentError(absl::StrFormat(
"submodel_ids[%s] is empty", node->placeholder_key()));
}
if (res.bag_count != 0 && res.bag_count != submodels.size()) {
return absl::InvalidArgumentError(
"all submodels should have the same number of bags");
}
res.bag_count = submodels.size();
} else {
res.plain_sum_nodes.push_back(node);
}
}
res.bag_count = std::max(res.bag_count, 1);
return res;
}
absl::Status ForestModel::HandlePlainSumExpression(
const std::vector<expr::ExprNodePtr>& submodel_nodes,
std::vector<expr::ExprNodePtr>&& plain_sum_nodes) {
ASSIGN_OR_RETURN(
processed_expression_,
expr::CallOp("core.get_first", {expr::Placeholder(*res_tuple_key_)}));
auto count_map = GetNodeCountMap(expression_);
for (auto& node : plain_sum_nodes) {
int count = count_map[node->fingerprint()];
if (count > 1) {
ASSIGN_OR_RETURN(node, expr::CallOp("math.multiply",
{node, expr::Literal<float>(count)}));
}
}
ASSIGN_OR_RETURN(processed_expression_,
AddAll(processed_expression_, plain_sum_nodes));
TreeFilter used_trees;
for (const auto& node : submodel_nodes) {
int count = count_map[node->fingerprint()];
for (int submodel_id : submodel_ids_[node->placeholder_key()]) {
used_trees.submodels.insert(submodel_id);
if (count > 1) submodel_weight_multipliers_[submodel_id] = count;
}
}
tree_filters_.push_back(used_trees);
return absl::OkStatus();
}
absl::Status ForestModel::HandleExpressionWithoutBags() {
absl::flat_hash_map<std::string, expr::ExprNodePtr> params;
for (const auto& [key, submodels] : submodel_ids_) {
ASSIGN_OR_RETURN(
params[key],
expr::CallOp("core.get_nth",
{expr::Placeholder(*res_tuple_key_),
expr::Literal<int64_t>(tree_filters_.size())}));
TreeFilter filter;
filter.submodels.insert(submodels.begin(), submodels.end());
tree_filters_.push_back(std::move(filter));
}
ASSIGN_OR_RETURN(processed_expression_,
SubstitutePlaceholders(expression_, params));
return absl::OkStatus();
}
absl::StatusOr<expr::ExprNodePtr> ForestModel::UsedBagCountExpr() const {
DCHECK_GT(bag_count_, 0);
if (!oob_filters_.has_value()) {
return expr::Literal<float>(bag_count_);
}
expr::ExprNodePtr used_bag_count = nullptr;
for (int bag_id = 0; bag_id < bag_count_; ++bag_id) {
ASSIGN_OR_RETURN(expr::ExprNodePtr used,
expr::CallOp("core.where", {(*oob_filters_)[bag_id],
expr::Literal<float>(1),
expr::Literal<float>(0)}));
if (used_bag_count != nullptr) {
ASSIGN_OR_RETURN(used_bag_count,
expr::CallOp("math.add", {used_bag_count, used}));
} else {
used_bag_count = used;
}
}
ASSIGN_OR_RETURN(
used_bag_count,
expr::CallOp(
"core.where",
{expr::CallOp("core.greater",
{used_bag_count, expr::Literal<float>(0)}),
used_bag_count, expr::Literal<OptionalValue<float>>(std::nullopt)}));
return used_bag_count;
}
absl::Status ForestModel::HandleExpressionWithBags() {
std::vector<expr::ExprNodePtr> bags(bag_count_);
for (int bag_id = 0; bag_id < bag_count_; ++bag_id) {
absl::flat_hash_map<std::string, expr::ExprNodePtr> params;
for (const auto& [key, submodels] : submodel_ids_) {
expr::ExprNodePtr& param = params[key];
ASSIGN_OR_RETURN(
param, expr::CallOp("core.get_nth",
{expr::Placeholder(*res_tuple_key_),
expr::Literal<int64_t>(tree_filters_.size())}));
TreeFilter filter;
if (submodels.size() <= bag_id) {
return absl::InternalError("invalid submodel_ids");
}
filter.submodels.insert(submodels[bag_id]);
tree_filters_.push_back(std::move(filter));
submodel_weight_multipliers_[submodels[bag_id]] = bag_count_;
}
ASSIGN_OR_RETURN(bags[bag_id], SubstitutePlaceholders(expression_, params));
if (oob_filters_.has_value()) {
ASSIGN_OR_RETURN(
bags[bag_id],
expr::CallOp("core.where", {(*oob_filters_)[bag_id], bags[bag_id],
expr::Literal<float>(0)}));
}
}
ASSIGN_OR_RETURN(
auto sum, AddAll(bags[0], absl::Span<expr::ExprNodePtr>(bags.data() + 1,
bag_count_ - 1)));
ASSIGN_OR_RETURN(processed_expression_,
expr::CallOp("math.divide", {sum, UsedBagCountExpr()}));
return absl::OkStatus();
}
absl::Status ForestModel::Initialize() {
if (submodel_ids_.empty()) {
res_tuple_key_ = std::nullopt;
processed_expression_ = expression_;
bag_count_ = 1;
return absl::OkStatus();
} else {
res_tuple_key_ = submodel_ids_.begin()->first;
}
ASSIGN_OR_RETURN(auto info, AnalyzeExpression());
is_plain_sum_ = info.plain_sum;
bag_count_ = info.bag_count;
if (oob_filters_.has_value() && oob_filters_->size() != bag_count_) {
return absl::FailedPreconditionError(
"if oob_filters is present, its size must be equal to bag count");
}
if (info.plain_sum && !oob_filters_) {
RETURN_IF_ERROR(HandlePlainSumExpression(info.submodel_nodes,
std::move(info.plain_sum_nodes)));
} else if (bag_count_ == 1 && !oob_filters_) {
RETURN_IF_ERROR(HandleExpressionWithoutBags());
} else {
RETURN_IF_ERROR(HandleExpressionWithBags());
}
if (truncation_step_.has_value()) {
for (TreeFilter& filter : tree_filters_) {
filter.step_range_to = *truncation_step_;
}
}
for (const auto& [id, _] : forest_->GetRequiredQTypes()) {
if (first_forest_input_id_.has_value()) {
first_forest_input_id_ = std::min(*first_forest_input_id_, id);
} else {
first_forest_input_id_ = id;
}
}
return absl::OkStatus();
}
namespace {
std::vector<DecisionTree> GetMaybeUsedTrees(
absl::Span<const DecisionTree> trees,
absl::Span<const TreeFilter> tree_filters) {
if (tree_filters.empty()) {
return {};
}
TreeFilter combined_step_filter{
.step_range_from = tree_filters.front().step_range_from,
.step_range_to = tree_filters.front().step_range_to};
for (int i = 1; i < tree_filters.size(); ++i) {
combined_step_filter.step_range_from = std::min(
combined_step_filter.step_range_from, tree_filters[i].step_range_from);
if (tree_filters[i].step_range_to == -1 ||
combined_step_filter.step_range_to == -1) {
combined_step_filter.step_range_to = -1;
} else {
combined_step_filter.step_range_to = std::max(
combined_step_filter.step_range_to, tree_filters[i].step_range_to);
}
}
std::vector<DecisionTree> res;
for (const DecisionTree& tree : trees) {
if (combined_step_filter(tree.tag)) {
res.push_back(tree);
}
}
return res;
}
}
absl::StatusOr<expr::ExprOperatorPtr> ForestModel::CreateDecisionForestOperator(
std::vector<TreeFilter> tree_filters) const {
DecisionForestPtr forest = forest_;
auto required_types = forest->GetRequiredQTypes();
if (!submodel_weight_multipliers_.empty()) {
std::vector<DecisionTree> trees =
GetMaybeUsedTrees(forest->GetTrees(), tree_filters);
for (DecisionTree& tree : trees) {
auto mult_iter = submodel_weight_multipliers_.find(tree.tag.submodel_id);
if (mult_iter != submodel_weight_multipliers_.end()) {
tree.weight *= mult_iter->second;
}
}
ASSIGN_OR_RETURN(forest, DecisionForest::FromTrees(std::move(trees)));
}
return std::make_shared<DecisionForestOperator>(
std::move(forest), std::move(tree_filters), required_types);
}
ForestModel::ForestModel(expr::ExprOperatorSignature&& signature,
Fingerprint&& fingerprint, ConstructorArgs&& args)
: expr::BasicExprOperator("core.forest_model", signature,
"DecisionForest with pre- and post-processing",
fingerprint),
forest_(std::move(args.forest)),
submodel_ids_(std::move(args.submodel_ids)),
oob_filters_(std::move(args.oob_filters)),
truncation_step_(args.truncation_step),
inputs_(std::move(args.inputs)),
expression_(std::move(args.expression)) {}
absl::string_view ForestModel::py_qvalue_specialization_key() const {
return kForestModelQValueSpecializationKey;
}
} | #include "arolla/decision_forest/expr_operator/forest_model.h"
#include <cstdint>
#include <limits>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/serving/expr_compiler.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithNameAnnotation;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::testing::WhenDynamicCastTo;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
absl::StatusOr<DecisionForestPtr> CreateForest() {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].tag.submodel_id = 0;
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.submodel_id = 1;
return DecisionForest::FromTrees(std::move(trees));
}
absl::StatusOr<ForestModelPtr> CreateForestModelOp() {
ForestModel::SubmodelIds submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSIGN_OR_RETURN(auto preprocessing,
expr::CallOp("math.add", {expr::Placeholder("arg"),
expr::Literal<int64_t>(1)}));
ASSIGN_OR_RETURN(auto expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSIGN_OR_RETURN(auto forest, CreateForest());
return ForestModel::Create({.forest = std::move(forest),
.submodel_ids = std::move(submodel_ids),
.inputs = {{"p1"}, {"p2", preprocessing}},
.expression = expression});
}
absl::Status InitAlias() {
static absl::NoDestructor<absl::Status> init_status(
expr::RegisterOperatorAlias("alias_math.add", "math.add").status());
return *init_status;
}
class ForestModelTest : public ::testing::Test {
void SetUp() override { CHECK_OK(InitAlias()); }
};
TEST_F(ForestModelTest, NotEnoughArgs) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSERT_OK_AND_ASSIGN(model_data.expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}};
EXPECT_THAT(ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough args")));
}
TEST_F(ForestModelTest, ParameterNameCollision) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0}}, {"Y", {1}}};
ASSERT_OK_AND_ASSIGN(
auto preprocessing,
expr::CallOp("math.add", {expr::Placeholder("arg"),
expr::Literal<OptionalValue<int64_t>>(1)}));
ASSERT_OK_AND_ASSIGN(model_data.expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}, {"p1", preprocessing}};
EXPECT_THAT(ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("non-unique parameter name: 'p1'")));
model_data.inputs = {{"X"}, {"p2", preprocessing}};
EXPECT_THAT(
ForestModel::Create(model_data),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("name collision of an input and a submodel: 'X'")));
}
TEST_F(ForestModelTest, IncorrectExpression) {
ASSERT_OK_AND_ASSIGN(DecisionForestPtr forest, DecisionForest::FromTrees({}));
{
ASSERT_OK_AND_ASSIGN(expr::ExprNodePtr expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("Y")}));
EXPECT_THAT(ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}},
.inputs = {{"p1"}, {"p2"}},
.expression = expression}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("P.Y doesn't correspond to any input and it "
"is not found in submodel_ids")));
}
{
expr::ExprNodePtr expression = expr::Placeholder("X");
EXPECT_THAT(
ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.expression = expression}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("submodels [Y] are not used in the expression, but are "
"mentioned in submodel_ids")));
}
{
expr::ExprNodePtr expression = expr::Leaf("X");
EXPECT_THAT(
ForestModel::Create({.forest = forest, .expression = expression}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("leaves are not allowed in an expression")));
}
}
TEST_F(ForestModelTest, UsingInputInExpression) {
ASSERT_OK_AND_ASSIGN(auto expression,
expr::CallOp("math.add", {expr::Placeholder("X"),
expr::Placeholder("p1")}));
auto f1 = expr::Literal<float>(1.0);
auto i5 = expr::Literal<int64_t>(5);
ASSERT_OK_AND_ASSIGN(auto forest, CreateForest());
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = forest,
.submodel_ids = {{"X", {0}}},
.inputs = {{"p1"}, {"p2"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {f1, i5}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_TRUE(expanded_model->is_op());
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_THAT(expanded_model->node_deps()[1], EqualsExpr(f1));
}
TEST_F(ForestModelTest, QTypePropagation) {
ASSERT_OK_AND_ASSIGN(auto model_op, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetQType<float>());
}
TEST_F(ForestModelTest, QTypePropagationUsesPreprocessing) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0, 1}}};
ASSERT_OK_AND_ASSIGN(auto preprocessing,
expr::CallOp("core.const_with_shape",
{expr::Literal<DenseArrayShape>({5}),
expr::Placeholder("arg")}));
model_data.expression = expr::Placeholder("X");
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1", preprocessing}, {"p2", preprocessing}};
ASSERT_OK_AND_ASSIGN(auto model_op, ForestModel::Create(model_data));
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetDenseArrayQType<float>());
}
TEST_F(ForestModelTest, QTypePropagationPlainSumWithBroadcasting) {
ForestModel::ConstructorArgs model_data;
model_data.submodel_ids = {{"X", {0, 1}}};
ASSERT_OK_AND_ASSIGN(
model_data.expression,
expr::CallOp("math.add",
{expr::Literal(CreateDenseArray<float>({1., 2., 3.})),
expr::Placeholder("X")}));
ASSERT_OK_AND_ASSIGN(model_data.forest, CreateForest());
model_data.inputs = {{"p1"}, {"p2"}};
ASSERT_OK_AND_ASSIGN(auto model_op, ForestModel::Create(model_data));
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(model_op, {expr::Literal<OptionalValue<float>>(1.0),
expr::Literal<int64_t>(5)}));
EXPECT_EQ(model->qtype(), GetDenseArrayQType<float>());
ASSERT_OK_AND_ASSIGN(auto lowered, expr::ToLowest(model));
EXPECT_EQ(lowered->qtype(), GetDenseArrayQType<float>());
}
TEST_F(ForestModelTest, EmptyForest) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
expr::ExprNodePtr expression = expr::Literal<float>(0.5);
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = std::move(forest),
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_THAT(expanded_model, EqualsExpr(expression));
}
TEST_F(ForestModelTest, ToLower) {
ASSERT_OK_AND_ASSIGN(auto model_op, CreateForestModelOp());
{
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0),
expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(model, WithNameAnnotation(model, "forest"));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_NE(model->fingerprint(), expanded_model->fingerprint());
EXPECT_EQ(ReadNameAnnotation(expanded_model), "forest");
}
{
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {expr::Leaf("f"), expr::Leaf("i")}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_EQ(model->fingerprint(), expanded_model->fingerprint());
}
}
absl::StatusOr<expr::ExprNodePtr> GetExpressionForTest(std::string A,
std::string B,
std::string C,
std::string op) {
return expr::CallOp(
"alias_math.add",
{expr::Placeholder(A),
expr::CallOp(op, {expr::Placeholder(B), expr::Placeholder(C)})});
}
TEST_F(ForestModelTest, ToLowerMergeSubmodels) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("input", "X", "Y", "math.add"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_THAT(expanded_model->node_deps()[0]->op().get(),
WhenDynamicCastTo<const expr::GetNthOperator*>(NotNull()));
}
TEST_F(ForestModelTest, MergeDuplicatedSubmodels) {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {3.0};
trees[1].tag.submodel_id = 1;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("X", "Y", "X", "math.add"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model, expr::CallOp(model_op, {}));
FrameLayout::Builder layout_builder;
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model, {}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output), Eq(5.0f));
}
TEST_F(ForestModelTest, DuplicatedNodes) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(auto expression,
GetExpressionForTest("input", "X", "input", "math.add"));
ASSERT_OK_AND_ASSIGN(auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Leaf("input")}));
FrameLayout::Builder layout_builder;
auto input_slot = layout_builder.AddSlot<float>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(
expr::DynamicEvaluationEngineOptions(), &layout_builder, model,
{{"input", TypedSlot::FromSlot(input_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(input_slot, 3.1f);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 6.2f);
}
TEST_F(ForestModelTest, ToLowerSingleBag) {
ASSERT_OK_AND_ASSIGN(auto forest, DecisionForest::FromTrees({}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0}}, {"Y", {1}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.0)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsRegisteredOperator(expanded_model->op()));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.add"));
EXPECT_TRUE(expanded_model->node_deps()[0]->is_literal());
EXPECT_TRUE(IsRegisteredOperator(expanded_model->node_deps()[1]->op()));
EXPECT_TRUE(IsBackendOperator(
*DecayRegisteredOperator(expanded_model->node_deps()[1]->op()),
"math.multiply"));
}
TEST_F(ForestModelTest, ToLowerExpandBags) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {2.0};
trees[1].tag.submodel_id = 1;
trees[2].adjustments = {4.0};
trees[2].tag.submodel_id = 2;
trees[3].adjustments = {8.0};
trees[3].tag.submodel_id = 3;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.2)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.divide"));
ASSERT_OK_AND_ASSIGN(
auto model_fn,
(ExprCompiler<std::tuple<float>, float>()).CompileOperator(model_op));
ASSERT_OK_AND_ASSIGN(float res, model_fn(1.2));
EXPECT_FLOAT_EQ(res, 69.2f);
}
TEST_F(ForestModelTest, OutOfBagFilters) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag.submodel_id = 0;
trees[1].adjustments = {2.0};
trees[1].tag.submodel_id = 1;
trees[2].adjustments = {4.0};
trees[2].tag.submodel_id = 2;
trees[3].adjustments = {8.0};
trees[3].tag.submodel_id = 3;
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(auto filter0,
expr::CallOp("core.less", {expr::Placeholder("input"),
expr::Literal(2.0f)}));
ASSERT_OK_AND_ASSIGN(auto filter1,
expr::CallOp("core.less", {expr::Literal(2.0f),
expr::Placeholder("input")}));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression,
.oob_filters = std::vector{filter0, filter1}}));
ASSERT_OK_AND_ASSIGN(auto model,
expr::CallOp(model_op, {expr::Literal<float>(1.2)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
EXPECT_TRUE(IsBackendOperator(*DecayRegisteredOperator(expanded_model->op()),
"math.divide"));
ASSERT_OK_AND_ASSIGN(auto model_fn,
(ExprCompiler<std::tuple<float>, OptionalValue<float>>())
.CompileOperator(model_op));
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(1));
EXPECT_EQ(res, 9.0f);
}
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(2));
EXPECT_EQ(res, OptionalValue<float>{});
}
{
ASSERT_OK_AND_ASSIGN(OptionalValue<float> res, model_fn(3));
EXPECT_EQ(res, 131.0f);
}
}
TEST_F(ForestModelTest, BagsAndTruncation) {
std::vector<DecisionTree> trees(4);
trees[0].adjustments = {1.0};
trees[0].tag = {.step = 0, .submodel_id = 0};
trees[1].adjustments = {2.0};
trees[1].tag = {.step = 0, .submodel_id = 1};
trees[2].adjustments = {4.0};
trees[2].tag = {.step = 1, .submodel_id = 2};
trees[3].adjustments = {8.0};
trees[3].tag = {.step = 1, .submodel_id = 3};
ASSERT_OK_AND_ASSIGN(auto forest,
DecisionForest::FromTrees({std::move(trees)}));
ASSERT_OK_AND_ASSIGN(
auto expression,
GetExpressionForTest("input", "X", "Y", "math.multiply"));
ASSERT_OK_AND_ASSIGN(
auto model_op,
ForestModel::Create({.forest = std::move(forest),
.submodel_ids = {{"X", {0, 2}}, {"Y", {1, 3}}},
.inputs = {{"input"}},
.expression = expression,
.truncation_step = 1}));
ASSERT_OK_AND_ASSIGN(
auto model_fn,
(ExprCompiler<std::tuple<float>, float>()).CompileOperator(model_op));
ASSERT_OK_AND_ASSIGN(float res, model_fn(1.2));
EXPECT_FLOAT_EQ(res, 5.2f);
}
TEST_F(ForestModelTest, ConversionToOptional) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<float>(1.0);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_optional", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, ConversionFromDouble) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<double>(1.0);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_float32", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, ConversionFromInteger) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
const auto input = expr::Literal<int>(1);
ASSERT_OK_AND_ASSIGN(const auto converted_input,
expr::CallOp("core.to_float32", {input}));
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model, expr::ToLowest(model));
ASSERT_OK_AND_ASSIGN(
auto model_with_converted_input,
expr::CallOp(model_op, {converted_input, expr::Literal<int64_t>(5)}));
ASSERT_OK_AND_ASSIGN(auto expanded_model_with_converted_input,
expr::ToLowest(model_with_converted_input));
EXPECT_THAT(expanded_model_with_converted_input, EqualsExpr(expanded_model));
}
TEST_F(ForestModelTest, EvaluateOnScalars) {
ASSERT_OK_AND_ASSIGN(auto forest_model, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(forest_model, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<float>();
auto i_slot = layout_builder.AddSlot<int64_t>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(const FrameLayout::Slot<float> output,
executable_model->output_slot().ToSlot<float>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(f_slot, 1.0f);
ctx.Set(i_slot, 5);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 5.5f);
ctx.Set(f_slot, 3.0f);
ctx.Set(i_slot, 0);
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_FLOAT_EQ(ctx.Get(output), 8.5f);
}
TEST_F(ForestModelTest, EvaluateOnScalarAndArray) {
ASSERT_OK_AND_ASSIGN(auto forest_model, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model,
expr::CallOp(forest_model, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<DenseArray<float>>();
auto i_slot = layout_builder.AddSlot<int64_t>();
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"DENSE_ARRAY_FLOAT32 and "
"arg[1] is OPTIONAL_INT64")));
}
TEST_F(ForestModelTest, EvaluateOnDenseArrays) {
ASSERT_OK_AND_ASSIGN(const auto model_op, CreateForestModelOp());
ASSERT_OK_AND_ASSIGN(
auto model, expr::CallOp(model_op, {expr::Leaf("f"), expr::Leaf("i")}));
FrameLayout::Builder layout_builder;
auto f_slot = layout_builder.AddSlot<DenseArray<float>>();
auto i_slot = layout_builder.AddSlot<DenseArray<int64_t>>();
ASSERT_OK_AND_ASSIGN(
auto executable_model,
CompileAndBindForDynamicEvaluation(expr::DynamicEvaluationEngineOptions(),
&layout_builder, model,
{{"f", TypedSlot::FromSlot(f_slot)},
{"i", TypedSlot::FromSlot(i_slot)}}));
FrameLayout layout = std::move(layout_builder).Build();
ASSERT_OK_AND_ASSIGN(
const FrameLayout::Slot<DenseArray<float>> output,
executable_model->output_slot().ToSlot<DenseArray<float>>());
RootEvaluationContext ctx(&layout);
EXPECT_OK(executable_model->InitializeLiterals(&ctx));
ctx.Set(f_slot, CreateDenseArray<float>({1.0f, 3.0f}));
ctx.Set(i_slot, CreateDenseArray<int64_t>({5, 0}));
EXPECT_THAT(executable_model->Execute(&ctx), IsOk());
EXPECT_THAT(ctx.Get(output), ElementsAre(5.5f, 8.5f));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/forest_model.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/forest_model_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ced0654c-b312-4590-8e3b-72ba1bdfd36c | cpp | tensorflow/tensorflow | saved_model_splitter | tensorflow/tools/proto_splitter/cc/saved_model_splitter.cc | tensorflow/tools/proto_splitter/cc/saved_model_splitter_test.cc | #include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h"
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/graph_def_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/large_node_splitter.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tools::proto_splitter {
using namespace std::string_literals;
absl::Status SavedModelSplitter::BuildChunks() {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
SavedModel* sm = tsl::protobuf::DynamicCastToGenerated<SavedModel>(message());
int max_size = GetMaxSize();
if (GetInitialSize() < max_size) return absl::OkStatus();
std::vector<FieldType> fields_to_graph_def = {"meta_graphs"s, 0,
"graph_def"s};
GraphDefSplitter graph_def_splitter(
sm->mutable_meta_graphs(0)->mutable_graph_def(), this,
&fields_to_graph_def);
TF_RETURN_IF_ERROR(graph_def_splitter.BuildChunks());
if (sm->ByteSizeLong() < max_size) return absl::OkStatus();
LargeNodeSplitter<GraphDef> entire_graph_splitter(
sm->mutable_meta_graphs(0)->mutable_graph_def(), this,
&fields_to_graph_def);
int index = 1;
entire_graph_splitter.SetChunkIndex(&index);
TF_RETURN_IF_ERROR(entire_graph_splitter.BuildChunks());
return absl::OkStatus();
}
}
} | #include "tensorflow/tools/proto_splitter/cc/saved_model_splitter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/tools/proto_splitter/cc/max_size.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
#define EXPECT_CHUNK_SIZES(chunks, max_size) \
do { \
for (auto chunk : *chunks) { \
if (std::holds_alternative<std::shared_ptr<tsl::protobuf::Message>>( \
chunk)) { \
EXPECT_LE(std::get<std::shared_ptr<tsl::protobuf::Message>>(chunk) \
->ByteSizeLong(), \
max_size); \
} else if (std::holds_alternative<tsl::protobuf::Message*>(chunk)) { \
EXPECT_LE(std::get<tsl::protobuf::Message*>(chunk)->ByteSizeLong(), \
max_size); \
} \
} \
} while (0)
std::string NonChunkedSavedModel() {
return io::JoinPath(testing::TensorFlowSrcRoot(), "cc", "saved_model",
"testdata", "chunked_saved_model", "non_chunked_model",
"saved_model.pb");
}
TEST(SavedModelSplitterTest, TestSplit) {
SavedModel proto;
int64_t max_size = 80000;
DebugSetMaxSize(max_size);
TF_EXPECT_OK(tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
NonChunkedSavedModel(), &proto));
EXPECT_GE(proto.ByteSizeLong(), GetMaxSize());
SavedModelSplitter splitter(&proto);
TF_ASSERT_OK_AND_ASSIGN(auto x, splitter.Split());
std::vector<MessageBytes>* chunks = x.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_EQ(2, chunks->size());
EXPECT_CHUNK_SIZES(chunks, max_size);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/saved_model_splitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/saved_model_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
419cad26-d16a-4110-b69d-61fb3bd29932 | cpp | google/leveldb | env | util/env.cc | util/env_test.cc | #include "leveldb/env.h"
#include <cstdarg>
#if defined(_WIN32) && defined(LEVELDB_DELETEFILE_UNDEFINED)
#undef DeleteFile
#endif
namespace leveldb {
Env::Env() = default;
Env::~Env() = default;
Status Env::NewAppendableFile(const std::string& fname, WritableFile** result) {
return Status::NotSupported("NewAppendableFile", fname);
}
Status Env::RemoveDir(const std::string& dirname) { return DeleteDir(dirname); }
Status Env::DeleteDir(const std::string& dirname) { return RemoveDir(dirname); }
Status Env::RemoveFile(const std::string& fname) { return DeleteFile(fname); }
Status Env::DeleteFile(const std::string& fname) { return RemoveFile(fname); }
SequentialFile::~SequentialFile() = default;
RandomAccessFile::~RandomAccessFile() = default;
WritableFile::~WritableFile() = default;
Logger::~Logger() = default;
FileLock::~FileLock() = default;
void Log(Logger* info_log, const char* format, ...) {
if (info_log != nullptr) {
std::va_list ap;
va_start(ap, format);
info_log->Logv(format, ap);
va_end(ap);
}
}
static Status DoWriteStringToFile(Env* env, const Slice& data,
const std::string& fname, bool should_sync) {
WritableFile* file;
Status s = env->NewWritableFile(fname, &file);
if (!s.ok()) {
return s;
}
s = file->Append(data);
if (s.ok() && should_sync) {
s = file->Sync();
}
if (s.ok()) {
s = file->Close();
}
delete file;
if (!s.ok()) {
env->RemoveFile(fname);
}
return s;
}
Status WriteStringToFile(Env* env, const Slice& data,
const std::string& fname) {
return DoWriteStringToFile(env, data, fname, false);
}
Status WriteStringToFileSync(Env* env, const Slice& data,
const std::string& fname) {
return DoWriteStringToFile(env, data, fname, true);
}
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
data->clear();
SequentialFile* file;
Status s = env->NewSequentialFile(fname, &file);
if (!s.ok()) {
return s;
}
static const int kBufferSize = 8192;
char* space = new char[kBufferSize];
while (true) {
Slice fragment;
s = file->Read(kBufferSize, &fragment, space);
if (!s.ok()) {
break;
}
data->append(fragment.data(), fragment.size());
if (fragment.empty()) {
break;
}
}
delete[] space;
delete file;
return s;
}
EnvWrapper::~EnvWrapper() {}
} | #include "leveldb/env.h"
#include <algorithm>
#include "gtest/gtest.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/mutexlock.h"
#include "util/testutil.h"
namespace leveldb {
class EnvTest : public testing::Test {
public:
EnvTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvTest, ReadWrite) {
Random rnd(test::RandomSeed());
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/open_on_read.txt";
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
static const size_t kDataSize = 10 * 1048576;
std::string data;
while (data.size() < kDataSize) {
int len = rnd.Skewed(18);
std::string r;
test::RandomString(&rnd, len, &r);
ASSERT_LEVELDB_OK(writable_file->Append(r));
data += r;
if (rnd.OneIn(10)) {
ASSERT_LEVELDB_OK(writable_file->Flush());
}
}
ASSERT_LEVELDB_OK(writable_file->Sync());
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
SequentialFile* sequential_file;
ASSERT_LEVELDB_OK(env_->NewSequentialFile(test_file_name, &sequential_file));
std::string read_result;
std::string scratch;
while (read_result.size() < data.size()) {
int len = std::min<int>(rnd.Skewed(18), data.size() - read_result.size());
scratch.resize(std::max(len, 1));
Slice read;
ASSERT_LEVELDB_OK(sequential_file->Read(len, &read, &scratch[0]));
if (len > 0) {
ASSERT_GT(read.size(), 0);
}
ASSERT_LE(read.size(), len);
read_result.append(read.data(), read.size());
}
ASSERT_EQ(read_result, data);
delete sequential_file;
}
TEST_F(EnvTest, RunImmediately) {
struct RunState {
port::Mutex mu;
port::CondVar cvar{&mu};
bool called = false;
static void Run(void* arg) {
RunState* state = reinterpret_cast<RunState*>(arg);
MutexLock l(&state->mu);
ASSERT_EQ(state->called, false);
state->called = true;
state->cvar.Signal();
}
};
RunState state;
env_->Schedule(&RunState::Run, &state);
MutexLock l(&state.mu);
while (!state.called) {
state.cvar.Wait();
}
}
TEST_F(EnvTest, RunMany) {
struct RunState {
port::Mutex mu;
port::CondVar cvar{&mu};
int run_count = 0;
};
struct Callback {
RunState* const state_;
bool run = false;
Callback(RunState* s) : state_(s) {}
static void Run(void* arg) {
Callback* callback = reinterpret_cast<Callback*>(arg);
RunState* state = callback->state_;
MutexLock l(&state->mu);
state->run_count++;
callback->run = true;
state->cvar.Signal();
}
};
RunState state;
Callback callback1(&state);
Callback callback2(&state);
Callback callback3(&state);
Callback callback4(&state);
env_->Schedule(&Callback::Run, &callback1);
env_->Schedule(&Callback::Run, &callback2);
env_->Schedule(&Callback::Run, &callback3);
env_->Schedule(&Callback::Run, &callback4);
MutexLock l(&state.mu);
while (state.run_count != 4) {
state.cvar.Wait();
}
ASSERT_TRUE(callback1.run);
ASSERT_TRUE(callback2.run);
ASSERT_TRUE(callback3.run);
ASSERT_TRUE(callback4.run);
}
struct State {
port::Mutex mu;
port::CondVar cvar{&mu};
int val GUARDED_BY(mu);
int num_running GUARDED_BY(mu);
State(int val, int num_running) : val(val), num_running(num_running) {}
};
static void ThreadBody(void* arg) {
State* s = reinterpret_cast<State*>(arg);
s->mu.Lock();
s->val += 1;
s->num_running -= 1;
s->cvar.Signal();
s->mu.Unlock();
}
TEST_F(EnvTest, StartThread) {
State state(0, 3);
for (int i = 0; i < 3; i++) {
env_->StartThread(&ThreadBody, &state);
}
MutexLock l(&state.mu);
while (state.num_running != 0) {
state.cvar.Wait();
}
ASSERT_EQ(state.val, 3);
}
TEST_F(EnvTest, TestOpenNonExistentFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string non_existent_file = test_dir + "/non_existent_file";
ASSERT_TRUE(!env_->FileExists(non_existent_file));
RandomAccessFile* random_access_file;
Status status =
env_->NewRandomAccessFile(non_existent_file, &random_access_file);
#if defined(LEVELDB_PLATFORM_CHROMIUM)
ASSERT_TRUE(status.IsIOError());
#else
ASSERT_TRUE(status.IsNotFound());
#endif
SequentialFile* sequential_file;
status = env_->NewSequentialFile(non_existent_file, &sequential_file);
#if defined(LEVELDB_PLATFORM_CHROMIUM)
ASSERT_TRUE(status.IsIOError());
#else
ASSERT_TRUE(status.IsNotFound());
#endif
}
TEST_F(EnvTest, ReopenWritableFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_writable_file.txt";
env_->RemoveFile(test_file_name);
WritableFile* writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
std::string data("hello world!");
ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
ASSERT_LEVELDB_OK(env_->NewWritableFile(test_file_name, &writable_file));
data = "42";
ASSERT_LEVELDB_OK(writable_file->Append(data));
ASSERT_LEVELDB_OK(writable_file->Close());
delete writable_file;
ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("42"), data);
env_->RemoveFile(test_file_name);
}
TEST_F(EnvTest, ReopenAppendableFile) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file_name = test_dir + "/reopen_appendable_file.txt";
env_->RemoveFile(test_file_name);
WritableFile* appendable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
std::string data("hello world!");
ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(test_file_name, &appendable_file));
data = "42";
ASSERT_LEVELDB_OK(appendable_file->Append(data));
ASSERT_LEVELDB_OK(appendable_file->Close());
delete appendable_file;
ASSERT_LEVELDB_OK(ReadFileToString(env_, test_file_name, &data));
ASSERT_EQ(std::string("hello world!42"), data);
env_->RemoveFile(test_file_name);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
8197c8f2-7fdf-4d67-84a6-52c2c7d2ab71 | cpp | google/arolla | dict | arolla/expr/optimization/peephole_optimizations/dict.cc | arolla/expr/optimization/peephole_optimizations/dict_test.cc | #include "arolla/expr/optimization/peephole_optimizations/dict.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/qtype/dict/dict_types.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> BoolDictOptimization() {
ExprNodePtr dict = Placeholder("dict");
ASSIGN_OR_RETURN(
ExprNodePtr pattern,
CallOpReference("array.at", {Placeholder("values"),
CallOpReference("dict._get_row",
{dict, Placeholder("p")})}));
ASSIGN_OR_RETURN(
ExprNodePtr true_value,
CallOpReference("array.at", {Placeholder("values"),
CallOpReference("dict._get_row",
{dict, Literal(true)})}));
ASSIGN_OR_RETURN(
ExprNodePtr false_value,
CallOpReference("array.at", {Placeholder("values"),
CallOpReference("dict._get_row",
{dict, Literal(false)})}));
ASSIGN_OR_RETURN(ExprNodePtr missing_value,
CallOpReference("core.empty_like", {true_value}));
ASSIGN_OR_RETURN(
ExprNodePtr replacement,
CallOpReference("bool.logical_if", {Placeholder("p"), true_value,
false_value, missing_value}));
auto is_bool_literal = [](const ExprNodePtr& node) {
return node->qvalue().has_value() &&
node->qtype() == GetKeyToRowDictQType<bool>();
};
auto is_not_literal = [](const ExprNodePtr& node) {
return !node->qvalue().has_value();
};
return PeepholeOptimization::CreatePatternOptimization(
pattern, replacement, {{"dict", is_bool_literal}, {"p", is_not_literal}});
}
absl::Status AddDictContainsOptimizations(
PeepholeOptimizationPack& optimizations) {
ASSIGN_OR_RETURN(ExprNodePtr replacement,
CallOpReference("dict._contains",
{Placeholder("dict"), Placeholder("x")}));
for (const char* op_has : {"core.has._optional", "core.has._array"}) {
{
ASSIGN_OR_RETURN(
ExprNodePtr pattern,
CallOpReference(
"core.presence_and",
{CallOpReference(op_has, {Placeholder("x")}),
CallOpReference("dict._contains",
{Placeholder("dict"), Placeholder("x")})}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
pattern, replacement));
}
{
ASSIGN_OR_RETURN(
ExprNodePtr pattern,
CallOpReference(
"core.presence_and",
{CallOpReference("dict._contains",
{Placeholder("dict"), Placeholder("x")}),
CallOpReference(op_has, {Placeholder("x")})}));
ASSIGN_OR_RETURN(optimizations.emplace_back(),
PeepholeOptimization::CreatePatternOptimization(
pattern, replacement));
}
}
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> DictOptimizations() {
PeepholeOptimizationPack optimizations;
ASSIGN_OR_RETURN(optimizations.emplace_back(), BoolDictOptimization());
RETURN_IF_ERROR(AddDictContainsOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/dict.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/dict/dict_types.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
class DictOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(optimizer_,
CreatePeepholeOptimizer({DictOptimizations}));
GetDenseArrayQType<int>();
GetDenseArrayQType<Unit>();
}
absl::StatusOr<ExprNodePtr> ApplyOptimizer(
absl::StatusOr<ExprNodePtr> status_or_expr) const {
ASSIGN_OR_RETURN(auto expr, ToLowest(status_or_expr));
return ToLowest(optimizer_->ApplyToNode(expr));
}
absl::StatusOr<ExprNodePtr> ToLowest(
const absl::StatusOr<ExprNodePtr>& status_or_expr) const {
if (!status_or_expr.ok()) {
return std::move(status_or_expr).status();
}
return ::arolla::expr::ToLowest(*status_or_expr);
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(DictOptimizationsTest, Bool) {
auto values = CreateDenseArray<float>({57.0, 1543.0});
auto p = Leaf("cond");
auto dict = Leaf("dict");
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("array.at",
{Literal(values), CallOp("dict._get_row", {dict, p})}));
{
ASSERT_OK_AND_ASSIGN(auto actual_expr, ApplyOptimizer(expr));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(expr));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr_with_literal_int_dict,
SubstituteByFingerprint(
expr, {{dict->fingerprint(),
Literal(KeyToRowDict<int64_t>{{1, 1}, {0, 0}})}}));
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ApplyOptimizer(expr_with_literal_int_dict));
ASSERT_OK_AND_ASSIGN(auto expected_expr,
ToLowest(expr_with_literal_int_dict));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr_with_literal_bool_dict,
SubstituteByFingerprint(
expr, {{dict->fingerprint(),
Literal(KeyToRowDict<bool>{{false, 1}, {true, 0}})}}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expected_true_value,
SubstituteByFingerprint(expr_with_literal_bool_dict,
{{p->fingerprint(), Literal(true)}}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expected_false_value,
SubstituteByFingerprint(expr_with_literal_bool_dict,
{{p->fingerprint(), Literal(false)}}));
ASSERT_OK_AND_ASSIGN(auto actual_expr,
ApplyOptimizer(expr_with_literal_bool_dict));
ASSERT_OK_AND_ASSIGN(
auto expected_expr,
ToLowest(CallOp("bool.logical_if",
{p, expected_true_value, expected_false_value,
CallOp("core.empty_like", {expected_true_value})})));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
TEST_F(DictOptimizationsTest, Contains) {
auto key = WithQTypeAnnotation(Leaf("key"), GetDenseArrayQType<int>());
auto dict = Leaf("dict");
ASSERT_OK_AND_ASSIGN(auto key_exists, CallOp("core.has", {key}));
ASSERT_OK_AND_ASSIGN(auto dict_contains_key,
CallOp("dict._contains", {dict, key}));
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.presence_and", {key_exists, dict_contains_key})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(dict_contains_key));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
{
ASSERT_OK_AND_ASSIGN(
auto actual_expr,
ApplyOptimizer(
CallOp("core.presence_and", {dict_contains_key, key_exists})));
ASSERT_OK_AND_ASSIGN(auto expected_expr, ToLowest(dict_contains_key));
EXPECT_THAT(actual_expr, EqualsExpr(expected_expr));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/dict.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/dict_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c33600c5-a968-467c-a73d-776e13f851df | cpp | tensorflow/tensorflow | encapsulate_xla_computations_pass | tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc | tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc | #include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h"
#include <functional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
const char* const kXlaClusterOutput = "XlaClusterOutput";
bool IsCpuGpuCompile(const Graph* graph) {
for (Node* n : graph->nodes()) {
string name;
if (!TryGetNodeAttr(n->attrs(), kXlaClusterIdAttr, &name)) continue;
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(n->requested_device(), &parsed)) {
if (parsed.type != DEVICE_CPU && parsed.type != DEVICE_GPU) {
return false;
}
}
}
return true;
}
bool is_guaranteed_constant(const Node& n) {
bool guaranteed_constant = false;
if (!TryGetNodeAttr(n.attrs(), "_is_guaranteed_constant",
&guaranteed_constant)) {
return false;
}
return guaranteed_constant;
}
Status GetIndexAttr(const Node& n, int num_args, int* index) {
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", index));
if (*index < 0 || *index >= num_args) {
return errors::InvalidArgument("Invalid ", n.type_string(), " number ",
*index);
}
return absl::OkStatus();
}
DataType EdgeType(const Edge* edge) {
return edge->dst()->input_type(edge->dst_input());
}
void AddControlInputs(const Node& node, absl::flat_hash_set<Node*>* deps) {
for (const Edge* edge : node.in_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->src());
}
}
}
void AddControlOutputs(const Node& node, absl::flat_hash_set<Node*>* deps) {
for (const Edge* edge : node.out_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->dst());
}
}
}
Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
const int num_args = input_permutation->size();
const int num_retvals = output_permutation->size();
std::vector<Node*> args;
std::vector<Node*> retvals;
args.reserve(num_args);
retvals.reserve(num_retvals);
for (Node* n : graph->nodes()) {
if (n->type_string() == "_Arg") {
if (is_guaranteed_constant(*n)) {
return errors::InvalidArgument(
"Guaranteed constants are not supported (", n->name(), ")");
}
args.push_back(n);
} else if (n->type_string() == "_Retval") {
retvals.push_back(n);
}
}
if (std::find(args.begin(), args.end(), nullptr) != args.end()) {
return errors::InvalidArgument("Missing or non-consecutive arguments");
}
std::sort(args.begin(), args.end(), [&](Node* a, Node* b) {
bool a_is_resource = (a->output_type(0) == DT_RESOURCE);
bool b_is_resource = (b->output_type(0) == DT_RESOURCE);
StringPiece a_name(a->name());
StringPiece b_name(b->name());
return std::tie(a_is_resource, a_name) < std::tie(b_is_resource, b_name);
});
std::sort(retvals.begin(), retvals.end(),
[](Node* a, Node* b) { return a->name() < b->name(); });
int variable_start_index = num_args;
for (int i = 0; i < num_args; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*args[i], num_args, &index));
if (args[i]->output_type(0) == DT_RESOURCE &&
variable_start_index == num_args) {
variable_start_index = i;
}
(*input_permutation)[index] = i;
args[i]->AddAttr("index", i);
}
VLOG(4) << "variable_start_index: " << variable_start_index;
for (int i = 0; i < num_retvals; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*retvals[i], num_retvals, &index));
(*output_permutation)[index] = i;
retvals[i]->AddAttr("index", i);
}
AddNodeAttr(kXlaClusterIdAttr, call_def->name(), call_def);
AddNodeAttr("_variable_start_index", variable_start_index, call_def);
TF_ASSIGN_OR_RETURN(uint64 fingerprint, FingerprintGraph(*graph));
VLOG(1) << "Subgraph fingerprint:" << fingerprint;
call_def->set_op(absl::StrCat(call_def->op(), "_", fingerprint));
return absl::OkStatus();
}
}
Status EncapsulateXlaComputationsPass::Encapsulate(
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def) {
for (const Edge* e : (*graph)->edges()) {
if (!e->IsControlEdge() &&
e->src()->attrs().Find(kXlaClusterIdAttr) != nullptr &&
e->dst()->attrs().Find(kXlaClusterIdAttr) == nullptr &&
e->dst()->type_string() != kXlaClusterOutput) {
return errors::InvalidArgument(
"Undeclared output of XLA computation. Some common causes of this "
"error are: 1) variable initializers that depend on the XLA "
"computation; 2) gradient computations that depend on the XLA "
"computation, which can be mitigated by moving gradient computations "
"inside XLA computation. Offending edge: ",
e->src()->name(), ":", e->src_output(), " -> ", e->dst()->name(), ":",
e->dst_input());
}
}
auto output = std::make_unique<Graph>((*graph)->op_registry());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kXlaClusterIdAttr, **graph, RewriteSubgraph,
true, &output, flib_def),
"EncapsulateXlaComputationsPass failed");
graph->swap(output);
return absl::OkStatus();
}
Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps(
Graph* graph,
const std::function<absl::StatusOr<bool>(const Node&)>& is_xla_launch_node,
const std::function<absl::StatusOr<XlaFunctionInfo>(const Node&)>&
get_xla_function_info,
const bool add_edges_to_output_of_downstream_nodes) {
std::vector<Node*> launch_nodes;
for (Node* n : graph->nodes()) {
TF_ASSIGN_OR_RETURN(const bool is_xla_launch_node, is_xla_launch_node(*n));
if (is_xla_launch_node) launch_nodes.push_back(n);
}
for (Node* launch : launch_nodes) {
TF_ASSIGN_OR_RETURN(const XlaFunctionInfo xla_function_info,
get_xla_function_info(*launch));
std::vector<const Edge*> in_edges;
TF_RETURN_IF_ERROR(launch->input_edges(&in_edges));
const int num_inputs = in_edges.size();
const int variable_start_index = xla_function_info.variable_start_index;
const int num_variables = num_inputs - variable_start_index;
const int num_args = variable_start_index;
VLOG(4) << "Launch node '" << launch->name() << "'"
<< " input edges: " << in_edges.size() << " num_args: " << num_args
<< " num_variables: " << num_variables;
std::vector<Node*> nodes_to_remove = {launch};
std::vector<std::pair<Node*, int>> data_inputs(num_inputs);
absl::flat_hash_set<Node*> control_inputs;
DataTypeVector arg_types(num_args);
AddControlInputs(*launch, &control_inputs);
for (int i = 0; i < num_args; ++i) {
const Edge* edge = in_edges[i];
data_inputs[i] = {edge->src(), edge->src_output()};
arg_types[i] = EdgeType(edge);
}
for (int i = 0; i < num_variables; ++i) {
int pos = variable_start_index + i;
const Edge* edge = in_edges[pos];
data_inputs[pos] = {edge->src(), edge->src_output()};
}
const int num_outputs = launch->output_types().size();
absl::flat_hash_set<Node*> control_outputs;
std::vector<std::vector<std::pair<Node*, int>>> data_outputs(num_outputs);
const DataTypeVector& output_types(launch->output_types());
for (const Edge* le : launch->out_edges()) {
if (le->IsControlEdge()) {
control_outputs.insert(le->dst());
} else {
TF_RET_CHECK(le->src_output() < num_outputs);
Node* output_node = le->dst();
if (add_edges_to_output_of_downstream_nodes) {
TF_RET_CHECK(output_node->type_string() == kXlaClusterOutput)
<< le->DebugString();
nodes_to_remove.push_back(output_node);
for (const Edge* oe : output_node->out_edges()) {
TF_RET_CHECK(!oe->IsControlEdge());
data_outputs[le->src_output()].push_back(
{oe->dst(), oe->dst_input()});
}
AddControlOutputs(*output_node, &control_outputs);
} else {
data_outputs[le->src_output()].push_back(
{le->dst(), le->dst_input()});
}
}
}
NodeDef def;
def.set_name(launch->name());
MergeDebugInfo(NodeDebugInfo(launch->def()), &def);
VLOG(2) << "Replacing with XlaLaunch";
VLOG(2) << "Device is " << launch->requested_device();
def.set_op("XlaLaunch");
def.set_device(launch->requested_device());
AddNodeAttr("Tconstants", DataTypeVector{}, &def);
AddNodeAttr("Targs", arg_types, &def);
AddNodeAttr("Nresources", num_variables, &def);
AddNodeAttr("Tresults", output_types, &def);
NameAttrList function;
function.set_name(xla_function_info.function_name);
AddNodeAttr("function", function, &def);
for (Node* node : nodes_to_remove) {
VLOG(2) << "Deleting node " << node->DebugString();
control_inputs.erase(node);
control_outputs.erase(node);
graph->RemoveNode(node);
}
TF_ASSIGN_OR_RETURN(Node * xla_launch, graph->AddNode(def));
for (int i = 0, end = data_inputs.size(); i < end; ++i) {
graph->AddEdge(data_inputs[i].first, data_inputs[i].second, xla_launch,
i);
}
for (Node* n : control_inputs) {
graph->AddControlEdge(n, xla_launch);
}
for (int i = 0, end = data_outputs.size(); i < end; ++i) {
for (const auto& successor : data_outputs[i]) {
graph->AddEdge(xla_launch, i, successor.first, successor.second);
}
}
for (Node* n : control_outputs) {
graph->AddControlEdge(xla_launch, n);
}
}
return absl::OkStatus();
}
Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps(
Graph* graph) {
const auto is_xla_launch_node = [](const Node& node) -> absl::StatusOr<bool> {
const string& name = GetNodeAttrString(node.attrs(), kXlaClusterIdAttr);
return !name.empty();
};
const auto get_xla_function_info =
[](const Node& node) -> absl::StatusOr<XlaFunctionInfo> {
XlaFunctionInfo result;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), "_variable_start_index",
&result.variable_start_index));
result.function_name = node.type_string();
return result;
};
return BuildXlaLaunchOps(graph, is_xla_launch_node, get_xla_function_info,
true);
}
Status EncapsulateXlaComputationsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateXlaComputations(): "
<< DumpGraphToFile("encapsulate_xla_computations_before",
**options.graph, options.flib_def);
const char* additional_help =
IsCpuGpuCompile(options.graph->get())
? xla::status_macros::kPossibleAutoJitAlternative
: "";
TF_RETURN_WITH_CONTEXT_IF_ERROR(Encapsulate(options.graph, options.flib_def),
additional_help);
VLOG(1) << "EncapsulateXlaComputations() half-way: "
<< DumpGraphToFile("encapsulate_xla_computations_halfway",
**options.graph, options.flib_def);
TF_RETURN_WITH_CONTEXT_IF_ERROR(BuildXlaLaunchOps(options.graph->get()),
additional_help);
VLOG(1) << "EncapsulateXlaComputations() finished: "
<< DumpGraphToFile("encapsulate_xla_computations_after",
**options.graph, options.flib_def);
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h"
#include "tensorflow/compiler/tf2xla/test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
static std::unique_ptr<Graph> MakeOuterGraph(
const FunctionLibraryDefinition& flib_def, const string& function) {
Scope scope = Scope::NewRootScope().ExitOnError();
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib_def.ToProto()));
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("launch0", function, &flib_def)
.Input(a.node()->name(), 0, DT_INT32)
.Input(b.node()->name(), 0, DT_FLOAT)
.Input(c.node()->name(), 0, DT_INT32)
.Input(d.node()->name(), 0, DT_FLOAT)
.Input(u.node()->name(), 0, DT_RESOURCE)
.Input(v.node()->name(), 0, DT_RESOURCE)
.Input(w.node()->name(), 0, DT_RESOURCE)
.Device("/gpu:0")
.Attr(kXlaClusterIdAttr, "launch0")
.Attr("_variable_start_index", 4)
.Finalize(&def));
Status status;
Node* launch = scope.graph()->AddNode(def, &status);
TF_CHECK_OK(status);
TF_CHECK_OK(scope.DoShapeInference(launch));
scope.graph()->AddEdge(a.node(), 0, launch, 0);
scope.graph()->AddEdge(b.node(), 0, launch, 1);
scope.graph()->AddEdge(c.node(), 0, launch, 2);
scope.graph()->AddEdge(d.node(), 0, launch, 3);
scope.graph()->AddEdge(u.node(), 0, launch, 4);
scope.graph()->AddEdge(v.node(), 0, launch, 5);
scope.graph()->AddEdge(w.node(), 0, launch, 6);
auto out0 =
ops::XlaClusterOutput(scope.WithOpName("Out0"), Output(launch, 0));
auto out1 =
ops::XlaClusterOutput(scope.WithOpName("Out1"), Output(launch, 1));
auto out2 =
ops::XlaClusterOutput(scope.WithOpName("Out2"), Output(launch, 2));
auto out3 =
ops::XlaClusterOutput(scope.WithOpName("Out3"), Output(launch, 3));
auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0);
auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0);
auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0);
auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1);
auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2);
auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(scope.ToGraph(graph.get()));
return graph;
}
static std::unique_ptr<Graph> MakeBodyGraph() {
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("a_0_arg"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("b_0_arg"), DT_FLOAT, 1);
auto arg2 = ops::_Arg(scope.WithOpName("c_0_arg"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("d_0_arg"), DT_FLOAT, 3);
auto arg4 = ops::_Arg(scope.WithOpName("u_0_arg"), DT_RESOURCE, 4);
auto arg5 = ops::_Arg(scope.WithOpName("v_0_arg"), DT_RESOURCE, 5);
auto arg6 = ops::_Arg(scope.WithOpName("w_0_arg"), DT_RESOURCE, 6);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
node->set_requested_device("/gpu:0");
};
auto b_identity = ops::Identity(scope.WithOpName("B_identity"), arg1);
add_attrs(b_identity.node());
auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), arg4, DT_FLOAT);
add_attrs(read_u.node());
auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), arg5, DT_FLOAT);
add_attrs(read_v.node());
auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), arg6, DT_FLOAT);
add_attrs(read_w.node());
auto e = ops::Add(scope.WithOpName("E"), arg0, arg2);
add_attrs(e.node());
auto f = ops::Add(scope.WithOpName("F"), read_v, read_w);
add_attrs(f.node());
auto g = ops::Add(scope.WithOpName("G"), f, arg3);
add_attrs(g.node());
auto out0 = ops::_Retval(scope.WithOpName("b_identity_0_retval_RetVal"),
b_identity, 0);
auto out1 = ops::_Retval(scope.WithOpName("e_0_retval_RetVal"), e, 1);
auto out2 = ops::_Retval(scope.WithOpName("g_0_retval_RetVal"), g, 2);
auto out3 =
ops::_Retval(scope.WithOpName("readu_0_retval_RetVal"), read_u, 3);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(scope.ToGraph(graph.get()));
return graph;
}
TEST(EncapsulateXlaComputations, DeterministicEncapsulate) {
auto get_serialized_graph = [](bool control_input_reversed,
bool operand_reversed) -> string {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto a0 = ops::Placeholder(scope.WithOpName("A0"), DT_INT32);
auto a1 = ops::Placeholder(scope.WithOpName("A1"), DT_INT32);
ops::Add e = operand_reversed ? ops::Add(scope.WithOpName("E"), a0, a1)
: ops::Add(scope.WithOpName("E"), a1, a0);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
};
add_attrs(e.node());
TF_CHECK_OK(scope.ToGraph(graph.get()));
auto get_node_in_graph = [&graph](Node* node) {
return graph->FindNodeId(node->id());
};
if (!control_input_reversed) {
graph->AddControlEdge(get_node_in_graph(a0.node()),
get_node_in_graph(e.node()), true);
graph->AddControlEdge(get_node_in_graph(a1.node()),
get_node_in_graph(e.node()), true);
} else {
graph->AddControlEdge(get_node_in_graph(a1.node()),
get_node_in_graph(e.node()), true);
graph->AddControlEdge(get_node_in_graph(a0.node()),
get_node_in_graph(e.node()), true);
}
}
TF_CHECK_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def));
return SerializeGraphDeterministic(*graph).value();
};
EXPECT_EQ(get_serialized_graph(true,
false),
get_serialized_graph(false,
false));
EXPECT_NE(get_serialized_graph(false,
true),
get_serialized_graph(false,
false));
}
TEST(EncapsulateXlaComputations, Encapsulate) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
node->set_requested_device("/gpu:0");
};
auto b_identity = ops::Identity(scope.WithOpName("B_identity"), b);
add_attrs(b_identity.node());
auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), u, DT_FLOAT);
add_attrs(read_u.node());
auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), v, DT_FLOAT);
add_attrs(read_v.node());
auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), w, DT_FLOAT);
add_attrs(read_w.node());
auto e = ops::Add(scope.WithOpName("E"), a, c);
add_attrs(e.node());
auto f = ops::Add(scope.WithOpName("F"), read_v, read_w);
add_attrs(f.node());
auto g = ops::Add(scope.WithOpName("G"), f, d);
add_attrs(g.node());
auto out0 = ops::XlaClusterOutput(scope.WithOpName("Out0"), b_identity);
auto out1 = ops::XlaClusterOutput(scope.WithOpName("Out1"), e);
auto out2 = ops::XlaClusterOutput(scope.WithOpName("Out2"), g);
auto out3 = ops::XlaClusterOutput(scope.WithOpName("Out3"), read_u);
auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0);
auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0);
auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0);
auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1);
auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2);
auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
}
std::unique_ptr<Graph> graph_copy(new Graph(&flib_def));
CopyGraph(*graph, graph_copy.get());
TF_ASSERT_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def));
std::unordered_map<string, Node*> index = graph->BuildNodeNameIndex();
string function = index.at("launch0")->type_string();
{
std::unique_ptr<Graph> outer = MakeOuterGraph(flib_def, function);
GraphDef expected_def;
outer->ToGraphDef(&expected_def);
GraphDef actual_def;
graph->ToGraphDef(&actual_def);
TF_EXPECT_GRAPH_EQ_INTERNAL(expected_def, actual_def);
}
{
std::unique_ptr<Graph> body = MakeBodyGraph();
GraphDef expected_body_def;
body->ToGraphDef(&expected_body_def);
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(function, flib_def, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_FLOAT, DT_INT32, DT_FLOAT,
DT_RESOURCE, DT_RESOURCE, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ((DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}),
result.ret_types);
TF_EXPECT_GRAPH_EQ(expected_body_def, result.gdef);
}
TF_ASSERT_OK(
EncapsulateXlaComputationsPass::Encapsulate(&graph_copy, &flib_def));
std::unordered_map<string, Node*> index_copy =
graph_copy->BuildNodeNameIndex();
string function_copy = index_copy.at("launch0")->type_string();
EXPECT_EQ(function, function_copy);
}
TEST(EncapsulateXlaComputations, BuildXlaLaunchOp) {
std::unique_ptr<Graph> body_graph = MakeBodyGraph();
FunctionDefLibrary flib;
TF_ASSERT_OK(GraphToFunctionDef(*body_graph, "launch0", flib.add_function()));
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
std::unique_ptr<Graph> graph = MakeOuterGraph(flib_def, "launch0");
TF_ASSERT_OK(EncapsulateXlaComputationsPass::BuildXlaLaunchOps(graph.get()));
Scope scope = Scope::DisabledShapeInferenceScope().ExitOnError();
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib));
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
NameAttrList function;
function.set_name("launch0");
auto launch = ops::XlaLaunch(
scope.WithOpName("launch0").WithDevice("/gpu:0"),
std::initializer_list<Input>{}, std::initializer_list<Input>{a, b, c, d},
std::initializer_list<Input>{u, v, w},
DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}, function);
auto consumer0_a =
ops::Identity(scope.WithOpName("consumer0_a"), launch.results[0]);
auto consumer0_b =
ops::Identity(scope.WithOpName("consumer0_b"), launch.results[0]);
auto consumer0_c =
ops::Identity(scope.WithOpName("consumer0_c"), launch.results[0]);
auto consumer1 =
ops::Identity(scope.WithOpName("consumer1"), launch.results[1]);
auto consumer2 =
ops::Identity(scope.WithOpName("consumer2"), launch.results[2]);
auto consumer3 =
ops::Identity(scope.WithOpName("consumer3"), launch.results[3]);
GraphDef expected_def;
TF_ASSERT_OK(scope.ToGraphDef(&expected_def));
GraphDef actual_def;
graph->ToGraphDef(&actual_def);
TF_EXPECT_GRAPH_EQ(expected_def, actual_def);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b066412e-e4c6-4b4d-93a2-74a41007ff22 | cpp | google/cel-cpp | regex_precompilation_optimization | eval/compiler/regex_precompilation_optimization.cc | eval/compiler/regex_precompilation_optimization_test.cc | #include "eval/compiler/regex_precompilation_optimization.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "base/builtins.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "common/value.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
#include "eval/eval/compiler_constant_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/regex_match_step.h"
#include "internal/casts.h"
#include "internal/status_macros.h"
#include "re2/re2.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Cast;
using ::cel::InstanceOf;
using ::cel::NativeTypeId;
using ::cel::StringValue;
using ::cel::Value;
using ::cel::ast_internal::AstImpl;
using ::cel::ast_internal::Call;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::Reference;
using ::cel::internal::down_cast;
using ReferenceMap = absl::flat_hash_map<int64_t, Reference>;
bool IsFunctionOverload(const Expr& expr, absl::string_view function,
absl::string_view overload, size_t arity,
const ReferenceMap& reference_map) {
if (!expr.has_call_expr()) {
return false;
}
const auto& call_expr = expr.call_expr();
if (call_expr.function() != function) {
return false;
}
if (call_expr.args().size() + (call_expr.has_target() ? 1 : 0) != arity) {
return false;
}
if (reference_map.empty()) {
return true;
}
auto reference = reference_map.find(expr.id());
if (reference != reference_map.end() &&
reference->second.overload_id().size() == 1 &&
reference->second.overload_id().front() == overload) {
return true;
}
return false;
}
class RegexProgramBuilder final {
public:
explicit RegexProgramBuilder(int max_program_size)
: max_program_size_(max_program_size) {}
absl::StatusOr<std::shared_ptr<const RE2>> BuildRegexProgram(
std::string pattern) {
auto existing = programs_.find(pattern);
if (existing != programs_.end()) {
if (auto program = existing->second.lock(); program) {
return program;
}
programs_.erase(existing);
}
auto program = std::make_shared<RE2>(pattern);
if (max_program_size_ > 0 && program->ProgramSize() > max_program_size_) {
return absl::InvalidArgumentError("exceeded RE2 max program size");
}
if (!program->ok()) {
return absl::InvalidArgumentError(
"invalid_argument unsupported RE2 pattern for matches");
}
programs_.insert({std::move(pattern), program});
return program;
}
private:
const int max_program_size_;
absl::flat_hash_map<std::string, std::weak_ptr<const RE2>> programs_;
};
class RegexPrecompilationOptimization : public ProgramOptimizer {
public:
explicit RegexPrecompilationOptimization(const ReferenceMap& reference_map,
int regex_max_program_size)
: reference_map_(reference_map),
regex_program_builder_(regex_max_program_size) {}
absl::Status OnPreVisit(PlannerContext& context, const Expr& node) override {
return absl::OkStatus();
}
absl::Status OnPostVisit(PlannerContext& context, const Expr& node) override {
if (!IsFunctionOverload(node, cel::builtin::kRegexMatch, "matches_string",
2, reference_map_)) {
return absl::OkStatus();
}
ProgramBuilder::Subexpression* subexpression =
context.program_builder().GetSubexpression(&node);
const Call& call_expr = node.call_expr();
const Expr& pattern_expr = call_expr.args().back();
absl::optional<std::string> pattern =
GetConstantString(context, subexpression, node, pattern_expr);
if (!pattern.has_value()) {
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(
std::shared_ptr<const RE2> regex_program,
regex_program_builder_.BuildRegexProgram(std::move(pattern).value()));
if (subexpression == nullptr || subexpression->IsFlattened()) {
return absl::OkStatus();
}
const Expr& subject_expr =
call_expr.has_target() ? call_expr.target() : call_expr.args().front();
return RewritePlan(context, subexpression, node, subject_expr,
std::move(regex_program));
}
private:
absl::optional<std::string> GetConstantString(
PlannerContext& context,
absl::Nullable<ProgramBuilder::Subexpression*> subexpression,
const cel::ast_internal::Expr& call_expr,
const cel::ast_internal::Expr& re_expr) const {
if (re_expr.has_const_expr() && re_expr.const_expr().has_string_value()) {
return re_expr.const_expr().string_value();
}
if (subexpression == nullptr || subexpression->IsFlattened()) {
return absl::nullopt;
}
absl::optional<Value> constant;
if (subexpression->IsRecursive()) {
const auto& program = subexpression->recursive_program();
auto deps = program.step->GetDependencies();
if (deps.has_value() && deps->size() == 2) {
const auto* re_plan =
TryDowncastDirectStep<DirectCompilerConstantStep>(deps->at(1));
if (re_plan != nullptr) {
constant = re_plan->value();
}
}
} else {
ExecutionPathView re_plan = context.GetSubplan(re_expr);
if (re_plan.size() == 1 &&
re_plan[0]->GetNativeTypeId() ==
NativeTypeId::For<CompilerConstantStep>()) {
constant =
down_cast<const CompilerConstantStep*>(re_plan[0].get())->value();
}
}
if (constant.has_value() && InstanceOf<StringValue>(*constant)) {
return Cast<StringValue>(*constant).ToString();
}
return absl::nullopt;
}
absl::Status RewritePlan(
PlannerContext& context,
absl::Nonnull<ProgramBuilder::Subexpression*> subexpression,
const Expr& call, const Expr& subject,
std::shared_ptr<const RE2> regex_program) {
if (subexpression->IsRecursive()) {
return RewriteRecursivePlan(subexpression, call, subject,
std::move(regex_program));
}
return RewriteStackMachinePlan(context, call, subject,
std::move(regex_program));
}
absl::Status RewriteRecursivePlan(
absl::Nonnull<ProgramBuilder::Subexpression*> subexpression,
const Expr& call, const Expr& subject,
std::shared_ptr<const RE2> regex_program) {
auto program = subexpression->ExtractRecursiveProgram();
auto deps = program.step->ExtractDependencies();
if (!deps.has_value() || deps->size() != 2) {
subexpression->set_recursive_program(std::move(program.step),
program.depth);
return absl::OkStatus();
}
subexpression->set_recursive_program(
CreateDirectRegexMatchStep(call.id(), std::move(deps->at(0)),
std::move(regex_program)),
program.depth);
return absl::OkStatus();
}
absl::Status RewriteStackMachinePlan(
PlannerContext& context, const Expr& call, const Expr& subject,
std::shared_ptr<const RE2> regex_program) {
if (context.GetSubplan(subject).empty()) {
return absl::OkStatus();
}
CEL_ASSIGN_OR_RETURN(ExecutionPath new_plan,
context.ExtractSubplan(subject));
CEL_ASSIGN_OR_RETURN(
new_plan.emplace_back(),
CreateRegexMatchStep(std::move(regex_program), call.id()));
return context.ReplaceSubplan(call, std::move(new_plan));
}
const ReferenceMap& reference_map_;
RegexProgramBuilder regex_program_builder_;
};
}
ProgramOptimizerFactory CreateRegexPrecompilationExtension(
int regex_max_program_size) {
return [=](PlannerContext& context, const AstImpl& ast) {
return std::make_unique<RegexPrecompilationOptimization>(
ast.reference_map(), regex_max_program_size);
};
}
} | #include "eval/compiler/regex_precompilation_optimization.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "base/ast_internal/ast_impl.h"
#include "common/memory.h"
#include "common/values/legacy_value_manager.h"
#include "eval/compiler/cel_expression_builder_flat_impl.h"
#include "eval/compiler/constant_folding.h"
#include "eval/compiler/flat_expr_builder.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
#include "eval/eval/evaluator_core.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/internal/issue_collector.h"
#include "runtime/runtime_issue.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::RuntimeIssue;
using ::cel::runtime_internal::IssueCollector;
using ::google::api::expr::parser::Parse;
using ::testing::ElementsAre;
namespace exprpb = google::api::expr::v1alpha1;
class RegexPrecompilationExtensionTest : public testing::TestWithParam<bool> {
public:
RegexPrecompilationExtensionTest()
: type_registry_(*builder_.GetTypeRegistry()),
function_registry_(*builder_.GetRegistry()),
value_factory_(cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetTypeProvider()),
resolver_("", function_registry_.InternalGetRegistry(),
type_registry_.InternalGetModernRegistry(), value_factory_,
type_registry_.resolveable_enums()),
issue_collector_(RuntimeIssue::Severity::kError) {
if (EnableRecursivePlanning()) {
options_.max_recursion_depth = -1;
options_.enable_recursive_tracing = true;
}
options_.enable_regex = true;
options_.regex_max_program_size = 100;
options_.enable_regex_precompilation = true;
runtime_options_ = ConvertToRuntimeOptions(options_);
}
void SetUp() override {
ASSERT_OK(RegisterBuiltinFunctions(&function_registry_, options_));
}
bool EnableRecursivePlanning() { return GetParam(); }
protected:
CelEvaluationListener RecordStringValues() {
return [this](int64_t, const CelValue& value, google::protobuf::Arena*) {
if (value.IsString()) {
string_values_.push_back(std::string(value.StringOrDie().value()));
}
return absl::OkStatus();
};
}
CelExpressionBuilderFlatImpl builder_;
CelTypeRegistry& type_registry_;
CelFunctionRegistry& function_registry_;
InterpreterOptions options_;
cel::RuntimeOptions runtime_options_;
cel::common_internal::LegacyValueManager value_factory_;
Resolver resolver_;
IssueCollector issue_collector_;
std::vector<std::string> string_values_;
};
TEST_P(RegexPrecompilationExtensionTest, SmokeTest) {
ProgramOptimizerFactory factory =
CreateRegexPrecompilationExtension(options_.regex_max_program_size);
ExecutionPath path;
ProgramBuilder program_builder;
cel::ast_internal::AstImpl ast_impl;
ast_impl.set_is_checked(true);
PlannerContext context(resolver_, runtime_options_, value_factory_,
issue_collector_, program_builder);
ASSERT_OK_AND_ASSIGN(std::unique_ptr<ProgramOptimizer> optimizer,
factory(context, ast_impl));
}
TEST_P(RegexPrecompilationExtensionTest, OptimizeableExpression) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr parsed_expr,
Parse("input.matches(r'[a-zA-Z]+[0-9]*')"));
exprpb::CheckedExpr expr;
expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
(*expr.mutable_reference_map())[2].add_overload_id("matches_string");
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
ASSERT_OK(plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123"));
}
TEST_P(RegexPrecompilationExtensionTest, OptimizeParsedExpr) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr expr,
Parse("input.matches(r'[a-zA-Z]+[0-9]*')"));
ASSERT_OK_AND_ASSIGN(
std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr.expr(), &expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
ASSERT_OK(plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123"));
}
TEST_P(RegexPrecompilationExtensionTest, DoesNotOptimizeNonConstRegex) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr parsed_expr,
Parse("input.matches(input_re)"));
exprpb::CheckedExpr expr;
expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
(*expr.mutable_reference_map())[2].add_overload_id("matches_string");
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
activation.InsertValue("input_re", CelValue::CreateStringView("input_re"));
ASSERT_OK(plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123", "input_re"));
}
TEST_P(RegexPrecompilationExtensionTest, DoesNotOptimizeCompoundExpr) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr parsed_expr,
Parse("input.matches('abc' + 'def')"));
exprpb::CheckedExpr expr;
expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
(*expr.mutable_reference_map())[2].add_overload_id("matches_string");
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
ASSERT_OK(plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123", "abc", "def", "abcdef"));
}
class RegexConstFoldInteropTest : public RegexPrecompilationExtensionTest {
public:
RegexConstFoldInteropTest() : RegexPrecompilationExtensionTest() {
builder_.flat_expr_builder().AddProgramOptimizer(
cel::runtime_internal::CreateConstantFoldingOptimizer(
cel::MemoryManagerRef::ReferenceCounting()));
}
protected:
google::protobuf::Arena arena_;
};
TEST_P(RegexConstFoldInteropTest, StringConstantOptimizeable) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr parsed_expr,
Parse("input.matches('abc' + 'def')"));
exprpb::CheckedExpr expr;
expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
(*expr.mutable_reference_map())[2].add_overload_id("matches_string");
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
ASSERT_OK(plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123"));
}
TEST_P(RegexConstFoldInteropTest, WrongTypeNotOptimized) {
builder_.flat_expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(options_.regex_max_program_size));
ASSERT_OK_AND_ASSIGN(exprpb::ParsedExpr parsed_expr,
Parse("input.matches(123 + 456)"));
exprpb::CheckedExpr expr;
expr.mutable_expr()->Swap(parsed_expr.mutable_expr());
expr.mutable_source_info()->Swap(parsed_expr.mutable_source_info());
(*expr.mutable_reference_map())[2].add_overload_id("matches_string");
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> plan,
builder_.CreateExpression(&expr));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("input", CelValue::CreateStringView("input123"));
ASSERT_OK_AND_ASSIGN(CelValue result,
plan->Trace(activation, &arena, RecordStringValues()));
EXPECT_THAT(string_values_, ElementsAre("input123"));
EXPECT_TRUE(result.IsError());
EXPECT_TRUE(CheckNoMatchingOverloadError(result));
}
INSTANTIATE_TEST_SUITE_P(RegexPrecompilationExtensionTest,
RegexPrecompilationExtensionTest, testing::Bool());
INSTANTIATE_TEST_SUITE_P(RegexConstFoldInteropTest, RegexConstFoldInteropTest,
testing::Bool());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/regex_precompilation_optimization.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/regex_precompilation_optimization_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
51178850-f375-45ed-a8e6-f2e8892fe6b4 | cpp | tensorflow/tensorflow | multinomial | tensorflow/lite/kernels/multinomial.cc | tensorflow/lite/kernels/multinomial_test.cc | #include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <random>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace multinomial {
struct MultinomialParams {
std::default_random_engine rng;
};
template <typename FloatType, typename IntegralType>
TfLiteStatus MultinomialSample(std::default_random_engine& rng,
const FloatType* logits, int logits_size,
IntegralType* outputs, int output_size) {
std::vector<double> cumulative_odds;
cumulative_odds.reserve(logits_size);
double last_odds = 0.0;
FloatType max_logit = std::numeric_limits<FloatType>::lowest();
for (int i = 0; i < logits_size; i++) {
max_logit = std::max(max_logit, logits[i]);
}
for (int i = 0; i < logits_size; i++) {
FloatType odds = std::exp(logits[i] - max_logit) + last_odds;
cumulative_odds.push_back(odds);
last_odds = odds;
}
std::uniform_real_distribution<double> distribution{0.0,
cumulative_odds.back()};
for (int i = 0; i < output_size; i++) {
double sample = distribution(rng);
auto it = std::lower_bound(cumulative_odds.begin(), cumulative_odds.end(),
sample);
if (it == cumulative_odds.end()) {
return kTfLiteError;
}
*outputs++ = static_cast<IntegralType>(it - cumulative_odds.begin());
}
return kTfLiteOk;
}
template <typename FloatType>
TfLiteStatus MultinomialSample(TfLiteContext* context,
std::default_random_engine& rng,
const FloatType* logits, int logits_size,
TfLiteTensor* output, int outputs_offset,
int output_size) {
switch (output->type) {
case kTfLiteInt32:
return MultinomialSample<FloatType, int32_t>(
rng, logits, logits_size,
GetTensorData<int32_t>(output) + outputs_offset, output_size);
break;
case kTfLiteInt64:
return MultinomialSample<FloatType, int64_t>(
rng, logits, logits_size,
GetTensorData<int64_t>(output) + outputs_offset, output_size);
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported datatype for multinomial output: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
}
TfLiteStatus MultinomialSample(TfLiteContext* context,
std::default_random_engine& rng,
const TfLiteTensor* logits, int logits_offset,
int logits_size, TfLiteTensor* output,
int outputs_offset, int output_size) {
switch (logits->type) {
case kTfLiteFloat16:
TF_LITE_KERNEL_LOG(context, "TfLiteFloat16 is currently not supported.");
return kTfLiteError;
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context,
MultinomialSample<float>(
context, rng, GetTensorData<float>(logits) + logits_offset,
logits_size, output, outputs_offset, output_size));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(
context,
MultinomialSample<double>(
context, rng, GetTensorData<double>(logits) + logits_offset,
logits_size, output, outputs_offset, output_size));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported datatype for multinomial logit input: %s",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new MultinomialParams();
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<MultinomialParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* logits_input = tflite::GetInput(context, node, 0);
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(logits_input), 2);
int batch_size = tflite::SizeOfDimension(logits_input, 0);
const TfLiteTensor* num_samples_input = tflite::GetInput(context, node, 1);
TF_LITE_ENSURE_EQ(context, tflite::NumDimensions(num_samples_input), 0);
TF_LITE_ENSURE_EQ(context, num_samples_input->type, kTfLiteInt32);
TF_LITE_ENSURE(context, IsConstantTensor(num_samples_input));
int num_samples = *num_samples_input->data.i32;
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(2);
output_shape->data[0] = batch_size;
output_shape->data[1] = num_samples;
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
return context->ResizeTensor(context, output, output_shape);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
MultinomialParams* params =
reinterpret_cast<MultinomialParams*>(node->user_data);
TF_LITE_ENSURE(context, params != nullptr);
const TfLiteTensor* logits = tflite::GetInput(context, node, 0);
int batch_size = tflite::SizeOfDimension(logits, 0);
int logits_size = tflite::SizeOfDimension(logits, 1);
const TfLiteTensor* num_samples_input = tflite::GetInput(context, node, 1);
int output_size = *num_samples_input->data.i32;
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
for (int batch = 0; batch < batch_size; ++batch) {
int logits_offset = logits_size * batch;
int output_offset = output_size * batch;
TF_LITE_ENSURE_OK(
context,
MultinomialSample(context, params->rng, logits, logits_offset,
logits_size, output, output_offset, output_size));
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_MULTINOMIAL() {
static TfLiteRegistration r = {multinomial::Init, multinomial::Free,
multinomial::Prepare, multinomial::Eval};
return &r;
}
}
}
} | #include <algorithm>
#include <cmath>
#include <cstddef>
#include <limits>
#include <random>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/custom_ops_register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
template <>
tflite::TensorType GetTTEnum<int>() {
return tflite::TensorType_INT32;
}
template <>
tflite::TensorType GetTTEnum<int64_t>() {
return tflite::TensorType_INT64;
}
class MultinomialOpModel : public tflite::SingleOpModel {
public:
MultinomialOpModel(tflite::TensorData logits, int num_samples,
tflite::TensorData output) {
logits_ = AddInput(logits);
num_samples_ = AddConstInput(tflite::TensorType_INT32, {num_samples}, {});
output_ = AddOutput(output);
SetCustomOp("Multinomial", {}, ops::custom::Register_MULTINOMIAL);
BuildInterpreter({GetShape(logits_), GetShape(num_samples_)});
}
int logits_;
int num_samples_;
int output_;
int logits() { return logits_; }
int num_samples() { return num_samples_; }
int output() { return output_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
};
}
}
template <typename Type1, typename Type2>
struct TypePair {
using T1 = Type1;
using T2 = Type2;
};
template <typename TypePair>
class MultinomialTest : public ::testing::Test {
public:
using FloatType = typename TypePair::T1;
using IntegralType = typename TypePair::T2;
};
using TestTypes =
::testing::Types<TypePair<float, int>, TypePair<double, int>,
TypePair<float, int64_t>, TypePair<double, int64_t> >;
TYPED_TEST_SUITE(MultinomialTest, TestTypes);
TYPED_TEST(MultinomialTest, TestMultiBatch) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {3, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(m.logits(),
std::vector<Float>(9, static_cast<Float>(0.0f)));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples * 3);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0 + c1 + c2, 3 * kNumSamples);
EXPECT_GT(c0, 750);
EXPECT_GT(c1, 750);
EXPECT_GT(c2, 750);
EXPECT_LT(c0, 1250);
EXPECT_LT(c1, 1250);
EXPECT_LT(c2, 1250);
}
TYPED_TEST(MultinomialTest, TestSampleHighLogOdds) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(m.logits(),
{static_cast<Float>(0.0f), static_cast<Float>(1.0f),
static_cast<Float>(0.0f)});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0 + c1 + c2, kNumSamples);
EXPECT_GT(c1, c0);
EXPECT_GT(c1, c2);
}
TYPED_TEST(MultinomialTest, TestVeryLowLogOdds) {
const int kNumSamples = 1000;
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, 3}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
m.PopulateTensor<Float>(
m.logits(), {static_cast<Float>(-1000.0f), static_cast<Float>(-1000.0f),
static_cast<Float>(0.0f)});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
EXPECT_EQ(output.size(), kNumSamples);
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
int c2 = std::count(output.begin(), output.end(), 2);
EXPECT_EQ(c0, 0);
EXPECT_EQ(c1, 0);
EXPECT_EQ(c2, kNumSamples);
}
TYPED_TEST(MultinomialTest, TestSamplesDifferent) {
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
const int kNumSamples = 5;
const int kNumLogits = 1000;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, kNumLogits}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
std::vector<Float> logits(kNumLogits, static_cast<Float>(0.0f));
m.PopulateTensor<Float>(m.logits(), logits);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output1 = m.GetOutput<Int>();
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output2 = m.GetOutput<Int>();
bool successive_samples_are_different = false;
for (int i = 0; i < kNumSamples; ++i) {
if (output1[i] == output2[i]) continue;
successive_samples_are_different = true;
break;
}
EXPECT_TRUE(successive_samples_are_different);
}
TYPED_TEST(MultinomialTest, TestSamplesPrecise) {
using Float = typename TestFixture::FloatType;
using Int = typename TestFixture::IntegralType;
const int kNumSamples = 100000;
const int kNumLogits = 2;
tflite::MultinomialOpModel m({tflite::GetTTEnum<Float>(), {1, kNumLogits}},
kNumSamples, {tflite::GetTTEnum<Int>(), {}});
std::vector<Float> logits(
{static_cast<Float>(1000.0), static_cast<float>(1001.0)});
m.PopulateTensor<Float>(m.logits(), logits);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto output = m.GetOutput<Int>();
int c0 = std::count(output.begin(), output.end(), 0);
int c1 = std::count(output.begin(), output.end(), 1);
double p0 = static_cast<double>(c0) / (c0 + c1);
EXPECT_LT(std::abs(p0 - 0.26894142137), 0.01);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/multinomial.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/multinomial_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
84744b48-6c8e-41e3-bc3f-835b68c094da | cpp | google/tsl | abi | tsl/platform/abi.cc | tsl/platform/abi_test.cc | #include "tsl/platform/abi.h"
#include "tsl/platform/types.h"
#if defined(_MSC_VER)
#include <windows.h>
#include <cstring>
#else
#include <cxxabi.h>
#include <cstdlib>
#endif
#include <memory>
#include <string>
#if defined(_MSC_VER)
extern "C" char* __unDName(char* output_string, const char* name,
int max_string_length, void* (*p_alloc)(std::size_t),
void (*p_free)(void*), unsigned short disable_flags);
#endif
namespace tsl {
namespace port {
string MaybeAbiDemangle(const char* name) {
#if defined(_MSC_VER)
std::unique_ptr<char> demangled{__unDName(nullptr, name, 0, std::malloc,
std::free,
static_cast<unsigned short>(0))};
return string(demangled.get() != nullptr ? demangled.get() : name);
#else
int status = 0;
std::unique_ptr<char, void (*)(void*)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status), std::free};
return (status == 0) ? res.get() : name;
#endif
}
}
} | #include "tsl/platform/abi.h"
#include <typeinfo>
#include "tsl/platform/test.h"
namespace tsl {
struct MyRandomPODType {};
TEST(AbiTest, AbiDemangleTest) {
EXPECT_EQ(port::MaybeAbiDemangle(typeid(int).name()), "int");
#ifdef PLATFORM_WINDOWS
const char pod_type_name[] = "struct tsl::MyRandomPODType";
#else
const char pod_type_name[] = "tsl::MyRandomPODType";
#endif
EXPECT_EQ(port::MaybeAbiDemangle(typeid(MyRandomPODType).name()),
pod_type_name);
EXPECT_EQ(
port::MaybeAbiDemangle("help! i'm caught in a C++ mangle factoryasdf"),
"help! i'm caught in a C++ mangle factoryasdf");
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/abi.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/abi_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
30f62b59-33de-4ae1-862a-1be8642281f9 | cpp | tensorflow/tensorflow | tpu_embedding_errors | tensorflow/core/tpu/tpu_embedding_errors.cc | tensorflow/core/tpu/tpu_embedding_errors_test.cc | #include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/tpu_embedding_configuration.pb.h"
namespace tensorflow::tpu {
Status AppendTpuEmbeddingErrorPayload(Status obj) {
if (obj.ok()) {
return absl::OkStatus();
} else {
const std::string error_message =
absl::StrCat(kTpuEmbeddingErrorMessage, ". ", obj.message());
Status status(obj.code(), error_message);
TPUEmbeddingError error_payload;
status.SetPayload(kTpuEmbeddingErrorUrl,
absl::Cord(error_payload.SerializeAsString()));
return status;
}
}
bool HasTpuEmbeddingErrorPayload(const Status& status) {
return status.GetPayload(kTpuEmbeddingErrorUrl).has_value();
}
bool HasTpuEmbeddingErrorMessage(const Status& status) {
return absl::StrContains(status.message(), kTpuEmbeddingErrorMessage);
}
} | #include "tensorflow/core/tpu/tpu_embedding_errors.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow::tpu {
namespace {
using absl::Status;
using absl::StatusOr;
StatusOr<std::string> GenerateTFStatusOr(absl::StatusCode code,
absl::string_view value = "") {
if (code == absl::StatusCode::kOk) {
return std::string(value);
} else {
return absl::Status(code, value);
}
}
TEST(TpuEmbeddingErrors, StatusOk) {
constexpr absl::string_view kValue = "success";
{
const Status status = AppendTpuEmbeddingErrorPayload(absl::OkStatus());
TF_EXPECT_OK(status);
EXPECT_FALSE(HasTpuEmbeddingErrorPayload(status));
EXPECT_FALSE(HasTpuEmbeddingErrorMessage(status));
}
{
TF_ASSERT_OK_AND_ASSIGN(const std::string value,
AppendTpuEmbeddingErrorPayload(GenerateTFStatusOr(
absl::StatusCode::kOk, kValue)));
EXPECT_EQ(value, kValue);
}
}
TEST(TpuEmbeddingErrors, StatusFailed) {
{
const Status status =
AppendTpuEmbeddingErrorPayload(errors::InvalidArgument(""));
EXPECT_EQ(status.code(), error::Code::INVALID_ARGUMENT);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
{
StatusOr<std::string> status_or = AppendTpuEmbeddingErrorPayload(
GenerateTFStatusOr(absl::StatusCode::kResourceExhausted));
EXPECT_FALSE(status_or.ok());
const Status& status = status_or.status();
EXPECT_EQ(status.code(), error::Code::RESOURCE_EXHAUSTED);
EXPECT_TRUE(HasTpuEmbeddingErrorPayload(status));
EXPECT_TRUE(HasTpuEmbeddingErrorMessage(status));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_errors.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/tpu_embedding_errors_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b05ee199-442e-4b1e-aba6-4f082d0a2e56 | cpp | tensorflow/tensorflow | scatter_nd | tensorflow/lite/kernels/scatter_nd.cc | tensorflow/lite/kernels/scatter_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace scatter_nd {
constexpr int kIndices = 0;
constexpr int kUpdates = 1;
constexpr int kShape = 2;
constexpr int kOutputTensor = 0;
template <typename IndicesT>
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* shape,
TfLiteTensor* output) {
const int shape_rank = SizeOfDimension(shape, 0);
TfLiteIntArray* output_shape = TfLiteIntArrayCreate(shape_rank);
const auto* shape_data = GetTensorData<IndicesT>(shape);
for (int i = 0; i < shape_rank; i++) {
output_shape->data[i] = shape_data[i];
}
return context->ResizeTensor(context, output, output_shape);
}
template <typename IndicesT>
TfLiteStatus CheckShapes(TfLiteContext* context, const RuntimeShape& indices,
const RuntimeShape& updates,
const RuntimeShape& shape_shape,
const IndicesT* shape_data) {
TF_LITE_ENSURE(context, (indices.DimensionsCount() >= 1) &&
(updates.DimensionsCount() >= 1) &&
(shape_shape.DimensionsCount() == 1));
const int outer_dims = indices.DimensionsCount() - 1;
for (int i = 0; i < outer_dims; ++i) {
TF_LITE_ENSURE_EQ(context, indices.Dims(i), updates.Dims(i));
}
const int ix = indices.Dims(outer_dims);
TF_LITE_ENSURE_EQ(context, updates.DimensionsCount() - outer_dims,
shape_shape.Dims(0) - ix);
for (int i = 0; i + outer_dims < updates.DimensionsCount(); ++i) {
TF_LITE_ENSURE_EQ(context, updates.Dims(i + outer_dims),
shape_data[ix + i]);
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
switch (updates->type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteBool:
case kTfLiteInt8:
case kTfLiteInt64:
case kTfLiteInt32:
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (indices->type != shape->type) {
TF_LITE_KERNEL_LOG(context, "Indices and shape must have the same type.");
return kTfLiteError;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = updates->type;
if (IsConstantOrPersistentTensor(shape)) {
switch (indices->type) {
case kTfLiteInt32:
TF_LITE_ENSURE_OK(
context,
CheckShapes<int32_t>(context, GetTensorShape(indices),
GetTensorShape(updates), GetTensorShape(shape),
GetTensorData<int32_t>(shape)));
return ResizeOutputTensor<int32_t>(context, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
} else {
SetTensorToDynamic(output);
return kTfLiteOk;
}
}
template <typename IndicesT, typename UpdatesT>
TfLiteStatus ScatterNd(const TfLiteTensor* indices, const TfLiteTensor* updates,
TfLiteTensor* output) {
return reference_ops::ScatterNd(
GetTensorShape(indices), GetTensorData<IndicesT>(indices),
GetTensorShape(updates), GetTensorData<UpdatesT>(updates),
GetTensorShape(output), GetTensorData<UpdatesT>(output));
}
template <typename IndicesT>
TfLiteStatus EvalScatterNd(TfLiteContext* context, const TfLiteTensor* indices,
const TfLiteTensor* updates,
const TfLiteTensor* shape, TfLiteTensor* output) {
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(
context, CheckShapes<IndicesT>(
context, GetTensorShape(indices), GetTensorShape(updates),
GetTensorShape(shape), GetTensorData<IndicesT>(shape)));
TF_LITE_ENSURE_OK(context,
ResizeOutputTensor<IndicesT>(context, shape, output));
}
TfLiteStatus status = kTfLiteError;
switch (updates->type) {
case kTfLiteFloat32:
status = ScatterNd<IndicesT, float>(indices, updates, output);
break;
case kTfLiteUInt8:
status = ScatterNd<IndicesT, uint8_t>(indices, updates, output);
break;
case kTfLiteBool:
status = ScatterNd<IndicesT, bool>(indices, updates, output);
break;
case kTfLiteInt8:
status = ScatterNd<IndicesT, int8_t>(indices, updates, output);
break;
case kTfLiteInt32:
status = ScatterNd<IndicesT, int32_t>(indices, updates, output);
break;
case kTfLiteInt64:
status = ScatterNd<IndicesT, int64_t>(indices, updates, output);
break;
default:
TF_LITE_KERNEL_LOG(
context, "Updates of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(updates->type));
return kTfLiteError;
}
if (status != kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "scatter_nd index out of bounds");
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kIndices, &indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kUpdates, &updates));
const TfLiteTensor* shape;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kShape, &shape));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (indices->type) {
case kTfLiteInt32:
return EvalScatterNd<int32_t>(context, indices, updates, shape, output);
default:
TF_LITE_KERNEL_LOG(
context, "Indices of type '%s' are not supported by scatter_nd.",
TfLiteTypeGetName(indices->type));
return kTfLiteError;
}
}
}
TfLiteRegistration* Register_SCATTER_ND() {
static TfLiteRegistration r = { nullptr, nullptr,
scatter_nd::Prepare, scatter_nd::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class ScatterNdOpModel : public SingleOpModel {
public:
ScatterNdOpModel(const TensorData& indices, const TensorData& updates,
const TensorData& shape) {
indices_ = AddInput(indices);
updates_ = AddInput(updates);
shape_ = AddInput(shape);
output_ = AddOutput(updates.type);
SetBuiltinOp(BuiltinOperator_SCATTER_ND, BuiltinOptions_ScatterNdOptions,
CreateScatterNdOptions(builder_).Union());
BuildInterpreter(
{GetShape(indices_), GetShape(updates_), GetShape(shape_)});
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
void SetUpdates(std::initializer_list<T> data) {
PopulateTensor<T>(updates_, data);
}
template <typename T>
void SetShape(std::initializer_list<T> data) {
PopulateTensor<T>(shape_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int indices_;
int updates_;
int shape_;
int output_;
};
TEST(ScatterNdOpTest, ScatterElementIntoVector) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<float>({9, 10, 11, 12});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({0, 11, 0, 10, 9, 0, 0, 12}));
}
TEST(ScatterNdOpTest, ScatterMatrixIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 1}},
{TensorType_FLOAT32, {2, 4, 4}}, {TensorType_INT32, {3}});
m.SetIndices<int32_t>({0, 2});
m.SetUpdates<float>({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8});
m.SetShape<int32_t>({4, 4, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 4, 4}));
EXPECT_THAT(
m.GetOutput<float>(),
ElementsAreArray({5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoMatrix) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_FLOAT32, {4, 4}},
{TensorType_INT32, {2}});
m.SetIndices<int32_t>({ 9, 8, 0, 1});
m.SetUpdates<float>({ 1, 2, 3, 4,
5, 6, 7, 8,
9, 10, 11, 12,
13, 14, 15, 16});
m.SetShape<int32_t>({10, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({10, 4}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 9, 10, 11, 12,
13, 14, 15, 16,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
5, 6, 7, 8,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterMatricesIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 2}},
{TensorType_FLOAT32, {2, 2, 2, 2}},
{TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 1, 1, 0, 1, 0, 0, 1, 0});
m.SetUpdates<float>({ 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16});
m.SetShape<int32_t>({2, 2, 2, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2, 2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray({ 9, 10, 11, 12,
5, 6, 7, 8,
13, 14, 15, 16,
1, 2, 3, 4}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank4Tensor) {
ScatterNdOpModel m({TensorType_INT32, {2, 2, 3}},
{TensorType_FLOAT32, {2, 2, 5}}, {TensorType_INT32, {4}});
m.SetIndices<int32_t>(
{ 2, 2, 2, 1, 0, 1, 0, 2, 0, 2, 2, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({3, 3, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 3, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5,
}));
}
TEST(ScatterNdOpTest, ScatterVectorIntoRank3Tensor) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, OverlappedIndicesSummed) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_FLOAT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 1, 0, 0, 2, 0, 2, 1, 0});
m.SetUpdates<float>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
17, 19, 21, 23, 25,
17, 19, 21, 23, 25,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0}));
}
TEST(ScatterNdOpTest, Int32IndicesUint8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_UINT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<uint8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<uint8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt8Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT8, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int8_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int8_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt32Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT32, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int32_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int32_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesInt64Updates) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
}
TEST(ScatterNdOpTest, Int32IndicesBoolUpdates) {
ScatterNdOpModel m({TensorType_INT32, {4, 1}}, {TensorType_BOOL, {4}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 7});
m.SetUpdates<bool>({true, false, true, false});
m.SetShape<int32_t>({8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8}));
EXPECT_THAT(
m.GetOutput<bool>(),
ElementsAreArray({false, true, false, false, true, false, false, false}));
}
TEST(ScatterNdOpTest, DynamicShape) {
ScatterNdOpModel m({TensorType_INT32, {4, 2}}, {TensorType_INT64, {4, 5}},
{TensorType_INT32, {3}});
m.SetIndices<int32_t>({ 0, 0, 1, 0, 0, 2, 1, 2});
m.SetUpdates<int64_t>(
{ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20});
m.SetShape<int32_t>({2, 3, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 1, 2, 3, 4, 5,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20}));
m.SetIndices<int32_t>({ 2, 3, 1, 0, 2, 0, 1, 2});
m.SetShape<int32_t>({3, 4, 5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3, 4, 5}));
EXPECT_THAT(m.GetOutput<int64_t>(),
ElementsAreArray({ 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
6, 7, 8, 9, 10,
0, 0, 0, 0, 0,
16, 17, 18, 19, 20,
0, 0, 0, 0, 0,
11, 12, 13, 14, 15,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
1, 2, 3, 4, 5}));
}
TEST(ScatterNdOpTest, ReadAndWriteArrayLimits) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, 0, 2});
m.SetUpdates<int32_t>({1, 2, 3, 7, 9});
m.SetShape<int32_t>({5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({5}));
EXPECT_THAT(m.GetOutput<int32_t>(), ElementsAreArray({7, 3, 9, 2, 1}));
}
TEST(ScatterNdOpTest, OOBRead) {
ScatterNdOpModel m({TensorType_INT32, {1, 1}}, {TensorType_INT32, {1}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4});
m.SetUpdates<int32_t>({1});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
TEST(ScatterNdOpTest, OOBWrites) {
ScatterNdOpModel m({TensorType_INT32, {5, 1}}, {TensorType_INT32, {5}},
{TensorType_INT32, {1}});
m.SetIndices<int32_t>({4, 3, 1, -0x38, 0x38});
m.SetUpdates<int32_t>({1, 2, 3, 0x44444444, 0x55555555});
m.SetShape<int32_t>({1});
ASSERT_EQ(m.Invoke(), kTfLiteError);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/scatter_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/scatter_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3f4d455-cc3d-487e-b16e-d692bc492cec | cpp | tensorflow/tensorflow | xla_jit_compiled_cpu_function | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.cc | tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function_test.cc | #include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/tf2xla.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/cpu_function_runtime.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/service/cpu/buffer_info_util.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/platform_util.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/platform.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kHostPlatform[] = "Host";
absl::StatusOr<size_t> ComputeResultIndex(
const xla::BufferAssignment& buffer_assignment) {
TF_ASSIGN_OR_RETURN(const xla::BufferAllocation::Slice result_slice,
buffer_assignment.GetUniqueTopLevelOutputSlice());
return result_slice.index();
}
int CountResults(
absl::Span<const xla::cpu_function_runtime::BufferInfo> buffer_infos) {
int num_results = 0;
for (const auto& info : buffer_infos) {
if (info.is_result_parameter()) {
++num_results;
}
}
return num_results;
}
template <typename T>
void CollectNames(const T& entries, std::vector<string>* nonempty_names,
std::vector<const char*>* name_ptrs) {
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
nonempty_names->push_back(name);
}
}
name_ptrs->reserve(entries.size() + 1);
size_t nonempty_index = 0;
for (const auto& entry : entries) {
const string& name = entry.name();
if (!name.empty()) {
name_ptrs->push_back(nonempty_names->at(nonempty_index).c_str());
++nonempty_index;
} else {
name_ptrs->push_back("");
}
}
name_ptrs->push_back(nullptr);
}
}
absl::StatusOr<std::unique_ptr<XlaJitCompiledCpuFunction>>
XlaJitCompiledCpuFunction::Compile(
const GraphDef& graph_def, const tf2xla::Config& config,
const xla::ExecutableBuildOptions& build_options) {
TF_ASSIGN_OR_RETURN(se::Platform * platform,
xla::PlatformUtil::GetPlatform(kHostPlatform));
TF_ASSIGN_OR_RETURN(xla::LocalClient * client,
xla::ClientLibrary::GetOrCreateLocalClient(platform));
xla::XlaComputation computation;
TF_RETURN_IF_ERROR(tensorflow::ConvertGraphDefToXla(graph_def, config, client,
&computation));
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::ProgramShape> program_shape,
client->GetComputationShape(computation));
if (program_shape->result().element_type() != xla::TUPLE) {
return errors::Internal(
"XlaJitCompiledCpuFunction requires the XLA result to be a tuple");
}
program_shape->clear_parameter_names();
std::vector<const xla::Shape*> arg_shapes;
arg_shapes.reserve(program_shape->parameters_size());
for (int i = 0; i < program_shape->parameters_size(); ++i) {
arg_shapes.push_back(&program_shape->parameters(i));
}
xla::ExecutableBuildOptions build_options_copy = build_options;
build_options_copy.mutable_debug_options()->set_xla_cpu_use_thunk_runtime(
false);
TF_ASSIGN_OR_RETURN(auto executables, client->Compile(computation, arg_shapes,
build_options_copy));
TF_RET_CHECK(executables.size() == 1);
std::unique_ptr<xla::LocalExecutable> executable = std::move(executables[0]);
const xla::cpu::CpuExecutable* cpu_executable =
static_cast<xla::cpu::CpuExecutable*>(executable->executable());
XlaCompiledCpuFunction::RawFunction raw_function =
cpu_executable->compute_function();
const xla::BufferAssignment& buffer_assignment =
cpu_executable->buffer_assignment();
std::vector<xla::cpu_function_runtime::BufferInfo> buffer_infos =
xla::cpu::CreateBufferInfosFromBufferAssignment(cpu_executable->module(),
buffer_assignment);
std::vector<int32> arg_index_table =
xla::cpu::CreateArgIndexTableFromBufferInfos(buffer_infos);
TF_ASSIGN_OR_RETURN(size_t result_index,
ComputeResultIndex(buffer_assignment));
const int num_results = CountResults(buffer_infos);
std::unique_ptr<XlaJitCompiledCpuFunction> jit_unique_ptr(
new XlaJitCompiledCpuFunction);
XlaJitCompiledCpuFunction* jit = jit_unique_ptr.get();
jit->executable_ = std::move(executable);
jit->buffer_infos_ = std::move(buffer_infos);
jit->arg_index_table_ = std::move(arg_index_table);
jit->program_shape_ =
std::make_unique<xla::ProgramShapeProto>(program_shape->ToProto());
XlaCompiledCpuFunction::set_static_data_raw_function(&jit->static_data_,
raw_function);
XlaCompiledCpuFunction::set_static_data_buffer_infos(
&jit->static_data_, jit->buffer_infos_.data());
XlaCompiledCpuFunction::set_static_data_num_buffers(
&jit->static_data_, jit->buffer_infos_.size());
XlaCompiledCpuFunction::set_static_data_arg_index_table(
&jit->static_data_, jit->arg_index_table_.data());
XlaCompiledCpuFunction::set_static_data_num_args(
&jit->static_data_, jit->arg_index_table_.size());
XlaCompiledCpuFunction::set_static_data_num_variables(&jit->static_data_,
config.variable_size());
XlaCompiledCpuFunction::set_static_data_num_results(&jit->static_data_,
num_results);
XlaCompiledCpuFunction::set_static_data_result_index(&jit->static_data_,
result_index);
CollectNames(config.feed(), &jit->nonempty_arg_names_, &jit->arg_names_);
auto variable_copy = config.variable();
for (auto& var : variable_copy) {
if (var.name().empty()) {
var.set_name(var.node_name());
}
}
CollectNames(variable_copy, &jit->nonempty_variable_names_,
&jit->variable_names_);
CollectNames(config.fetch(), &jit->nonempty_result_names_,
&jit->result_names_);
XlaCompiledCpuFunction::set_static_data_arg_names(&jit->static_data_,
jit->arg_names_.data());
XlaCompiledCpuFunction::set_static_data_variable_names(
&jit->static_data_, jit->variable_names_.data());
XlaCompiledCpuFunction::set_static_data_result_names(
&jit->static_data_, jit->result_names_.data());
XlaCompiledCpuFunction::set_static_data_program_shape(
&jit->static_data_, jit->program_shape_.get());
if (cpu_executable->hlo_profiling_enabled()) {
XlaCompiledCpuFunction::set_static_data_hlo_profile_printer_data(
&jit->static_data_, &cpu_executable->hlo_profile_printer_data());
XlaCompiledCpuFunction::set_static_data_profile_counters_size(
&jit->static_data_,
cpu_executable->hlo_profile_printer_data().profile_counters_size());
}
return std::move(jit_unique_ptr);
}
} | #include "tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.h"
#include <memory>
#include <string>
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "tensorflow/compiler/tf2xla/xla_compiled_cpu_function.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/service/compiler.h"
#include "xla/service/platform_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/test.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::HasSubstr;
PLATFORM_DEFINE_ID(kFakePlatformId);
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
tf2xla::Feed* x = config.add_feed();
x->mutable_id()->set_node_name("x");
x->set_name("x_name");
tf2xla::Feed* y = config.add_feed();
y->mutable_id()->set_node_name("y");
y->set_name("y_name");
tf2xla::Fetch* sum = config.add_fetch();
sum->mutable_id()->set_node_name("sum");
sum->set_name("sum_name");
return config;
}
GraphDef SumGraphVariable() {
constexpr char text_proto[] = R"pb(
node {
name: "x"
op: "VarHandleOp"
attr {
key: "dtype"
value { type: DT_INT32 }
}
attr {
key: "shared_name"
value { s: "myvar" }
}
attr {
key: "shape"
value { shape { dim { size: 1 } } }
}
}
node {
name: "read"
op: "ReadVariableOp"
input: "x"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "y"
op: "Placeholder"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
node {
name: "sum"
op: "Add"
input: "read"
input: "y"
attr {
key: "T"
value { type: DT_INT32 }
}
}
node {
name: "assign"
op: "AssignVariableOp"
input: "x"
input: "sum"
attr {
key: "dtype"
value { type: DT_INT32 }
}
}
# We use this identity op to make sure assign doesn't get pruned away.
node {
name: "out"
op: "Identity"
input: "y"
input: "^assign"
attr {
key: "T"
value { type: DT_INT32 }
}
})pb";
GraphDef graph;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &graph));
return graph;
}
tf2xla::Config SumConfigVariable() {
constexpr char text_proto[] = R"pb(feed { id { node_name: "y" } }
variable {
node_name: "myvar"
shape { dim { size: 1 } }
type: DT_INT32
}
fetch { id { node_name: "out" } })pb";
tf2xla::Config config;
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &config));
return config;
}
TEST(XlaJitCompiledCpuFunction, CheckThunkDisabled) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
ASSERT_TRUE(jit->LocalExecutable().build_options().has_debug_options());
ASSERT_FALSE(jit->LocalExecutable()
.build_options()
.debug_options()
.xla_cpu_use_thunk_runtime());
}
TEST(XlaJitCompiledCpuFunction, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 1);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(function.LookupArgIndex("x_name"), 0);
EXPECT_EQ(function.LookupArgIndex("y_name"), 1);
EXPECT_EQ(function.LookupArgIndex(""), -1);
EXPECT_EQ(function.LookupArgIndex("x"), -1);
EXPECT_EQ(function.LookupArgIndex("y"), -1);
EXPECT_EQ(function.LookupArgIndex("sum"), -1);
EXPECT_EQ(function.LookupArgIndex("sum_name"), -1);
EXPECT_EQ(function.LookupResultIndex("sum_name"), 0);
EXPECT_EQ(function.LookupResultIndex(""), -1);
EXPECT_EQ(function.LookupResultIndex("x"), -1);
EXPECT_EQ(function.LookupResultIndex("y"), -1);
EXPECT_EQ(function.LookupResultIndex("sum"), -1);
EXPECT_EQ(function.LookupResultIndex("x_name"), -1);
EXPECT_EQ(function.LookupResultIndex("y_name"), -1);
EXPECT_EQ(0, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("x"), -1);
for (int i = 0; i < function.num_args(); ++i) {
const char* name = function.GetArgName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupArgIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
for (int i = 0; i < function.num_results(); ++i) {
const char* name = function.GetResultName(i);
ASSERT_NE(name, nullptr);
const int roundtrip_i = function.LookupResultIndex(name);
EXPECT_EQ(roundtrip_i, i) << " name= " << name;
}
EXPECT_EQ(function.GetArgName(-1), nullptr);
EXPECT_EQ(function.GetArgName(function.num_args()), nullptr);
EXPECT_EQ(function.GetResultName(-1), nullptr);
EXPECT_EQ(function.GetResultName(function.num_results()), nullptr);
EXPECT_EQ(function.GetVariableName(0), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, SumVariable) {
GraphDef graph_def = SumGraphVariable();
tf2xla::Config config = SumConfigVariable();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
XlaCompiledCpuFunction function(jit->StaticData());
ASSERT_EQ(function.num_args(), 2);
ASSERT_EQ(function.num_results(), 2);
*static_cast<int32*>(function.arg_data(0)) = 10;
*static_cast<int32*>(function.arg_data(1)) = 32;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 10);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 42);
*static_cast<int32*>(function.arg_data(0)) = 100;
*static_cast<int32*>(function.arg_data(1)) = 320;
EXPECT_TRUE(function.Run());
EXPECT_EQ(function.error_msg(), "");
EXPECT_EQ(*static_cast<int32*>(function.result_data(0)), 100);
EXPECT_EQ(*static_cast<int32*>(function.result_data(1)), 420);
EXPECT_TRUE(function.HasNameIndices());
EXPECT_EQ(2, function.num_args());
EXPECT_EQ(1, function.num_variables());
EXPECT_EQ(function.LookupVariableIndex("myvar"), 1);
const char* name = function.GetVariableName(0);
EXPECT_EQ(std::string(name), "myvar");
EXPECT_EQ(function.GetVariableName(1), nullptr);
EXPECT_EQ(function.GetVariableName(-1), nullptr);
using xla::ShapeUtil;
const xla::Shape s32 = ShapeUtil::MakeShape(xla::S32, {});
const xla::Shape s32_1 = ShapeUtil::MakeShape(xla::S32, {1});
ASSERT_TRUE(function.ProgramShape() != nullptr);
const xla::ProgramShape program_shape(*function.ProgramShape());
ASSERT_EQ(program_shape.parameters_size(), 2);
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(0), s32));
EXPECT_TRUE(ShapeUtil::Compatible(program_shape.parameters(1), s32_1));
const xla::Shape& result = program_shape.result();
ASSERT_EQ(result.element_type(), xla::TUPLE);
ASSERT_EQ(ShapeUtil::TupleElementCount(result), 2);
const xla::Shape& result0 = ShapeUtil::GetTupleElementShape(result, 0);
EXPECT_TRUE(ShapeUtil::Compatible(result0, s32));
}
TEST(XlaJitCompiledCpuFunction, CanCompileWithAdditionalPlatform) {
class FakePlatform : public se::Platform {
public:
FakePlatform() : name_("FakePlatform") {}
~FakePlatform() override {}
se::Platform::Id id() const override { return kFakePlatformId; }
int VisibleDeviceCount() const override { return 0; }
const string& Name() const override { return name_; }
absl::StatusOr<std::unique_ptr<se::DeviceDescription>> DescriptionForDevice(
int ordinal) const override {
return std::unique_ptr<se::DeviceDescription>(nullptr);
}
absl::StatusOr<se::StreamExecutor*> ExecutorForDevice(
int ordinal) override {
return nullptr;
}
private:
string name_;
};
TF_EXPECT_OK(
se::PlatformManager::RegisterPlatform(std::make_unique<FakePlatform>()));
xla::Compiler::RegisterCompilerFactory(kFakePlatformId, []() {
return std::unique_ptr<xla::Compiler>(nullptr);
});
EXPECT_THAT(xla::PlatformUtil::GetDefaultPlatform().status().message(),
HasSubstr("FakePlatform"));
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<XlaJitCompiledCpuFunction> jit,
XlaJitCompiledCpuFunction::Compile(graph_def, config,
xla::ExecutableBuildOptions()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/xla_jit_compiled_cpu_function_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ace438f3-1171-4bf3-b5af-b4186736073d | cpp | tensorflow/tensorflow | mkl_eager_op_rewrite | tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite.cc | tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite_test.cc | #ifdef INTEL_MKL
#include <string>
#include <unordered_map>
#include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
class MklEagerOpRewrite : public EagerOpRewrite {
public:
MklEagerOpRewrite(string name, string file, string line);
struct MklEagerOp {
string op_name;
std::function<bool(EagerOperation*)> RewriteRule;
std::function<Status(EagerOperation*, std::unique_ptr<EagerOperation>*)>
CreateMklOp;
};
private:
std::unordered_map<std::string, MklEagerOp> mkl_eager_ops_;
Status Run(EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op);
static Status SetupNewOp(EagerOperation* orig_op, const string mkl_op_name,
std::unique_ptr<EagerOperation>* new_mkl_op);
static Status CreateGenericMklOp(EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* mkl_op);
static bool RewriteConv2D(EagerOperation* op);
static bool RewriteSparseMatrixMatMul(EagerOperation* op);
static bool RewriteFusedBatchNormV3(EagerOperation* op);
Status RewriteToMklOp(EagerOperation* orig_op,
std::unique_ptr<EagerOperation>* mkl_op);
bool ShouldRewriteOp(EagerOperation* op);
static bool AlwaysRewrite(EagerOperation* op) { return true; }
bool IsKernelRegistered(string op_name, DataType dt);
void InsertMKLEagerOps(MklEagerOp op);
};
REGISTER_REWRITE(EagerOpRewriteRegistry::POST_PLACEMENT, 10000,
MklEagerOpRewrite);
MklEagerOpRewrite::MklEagerOpRewrite(string name, string file, string line)
: EagerOpRewrite(name, file, line) {
InsertMKLEagerOps({"AvgPool", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPoolGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPool3D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"AvgPool3DGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"BatchMatMul", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"BatchMatMulV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"Conv2D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv2DBackpropFilter", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps({"Conv2DBackpropInput", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps({"Conv3D", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv3DBackpropFilterV2", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps(
{"Conv3DBackpropInputV2", RewriteConv2D, CreateGenericMklOp});
InsertMKLEagerOps(
{"DepthwiseConv2dNative", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"DepthwiseConv2dNativeBackpropFilter", RewriteConv2D,
CreateGenericMklOp});
InsertMKLEagerOps({"DepthwiseConv2dNativeBackpropInput", RewriteConv2D,
CreateGenericMklOp});
InsertMKLEagerOps({"Einsum", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNorm", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNormGrad", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormGradV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormGradV3", RewriteFusedBatchNormV3, CreateGenericMklOp});
InsertMKLEagerOps({"FusedBatchNormV2", AlwaysRewrite, CreateGenericMklOp});
InsertMKLEagerOps(
{"FusedBatchNormV3", RewriteFusedBatchNormV3, CreateGenericMklOp});
InsertMKLEagerOps({"MatMul", AlwaysRewrite, CreateGenericMklOp});
#ifdef ENABLE_ONEDNN_V3
InsertMKLEagerOps(
{"SparseMatrixMatMul", RewriteSparseMatrixMatMul, CreateGenericMklOp});
#endif
};
void MklEagerOpRewrite::InsertMKLEagerOps(MklEagerOp op) {
mkl_eager_ops_.insert(std::make_pair(op.op_name, op));
}
Status MklEagerOpRewrite::Run(
EagerOperation* orig_op,
std::unique_ptr<tensorflow::EagerOperation>* out_op) {
if (ShouldRewriteOp(orig_op)) {
TF_CHECK_OK(RewriteToMklOp(orig_op, out_op));
}
return OkStatus();
}
Status MklEagerOpRewrite::SetupNewOp(
EagerOperation* orig_op, const string mkl_op_name,
std::unique_ptr<EagerOperation>* new_mkl_op) {
bool is_remote = false;
new_mkl_op->reset(new tensorflow::EagerOperation(&orig_op->EagerContext()));
TF_RETURN_IF_ERROR(new_mkl_op->get()->Reset(mkl_op_name.c_str(), nullptr,
is_remote, nullptr));
int num_inputs = orig_op->Inputs().size();
for (int i = 0; i < num_inputs; ++i) {
TF_RETURN_IF_ERROR((*new_mkl_op)->AddInput(orig_op->Inputs()[i]));
}
const NodeDef& orig_ndef = orig_op->MutableAttrs()->BuildNodeDef();
AttrSlice attr_list(orig_ndef);
for (const auto& attr : attr_list) {
(*new_mkl_op)->MutableAttrs()->Set(attr.first, attr.second);
}
if (!orig_op->EagerContext().RunEagerOpAsFunction()) {
(*new_mkl_op)
->MutableAttrs()
->Set("_kernel", mkl_op_registry::kMklNameChangeOpLabel);
}
string device_name = orig_op->DeviceName();
return (*new_mkl_op)->SetDeviceName(device_name.c_str());
}
Status MklEagerOpRewrite::CreateGenericMklOp(
EagerOperation* orig_op, std::unique_ptr<EagerOperation>* mkl_op) {
const string mkl_op_name =
mkl_op_registry::GetMklNativeOpName(orig_op->Name());
TF_CHECK_OK(SetupNewOp(orig_op, mkl_op_name, mkl_op));
return OkStatus();
}
bool MklEagerOpRewrite::ShouldRewriteOp(EagerOperation* op) {
if (!IsMKLEnabled()) {
return false;
}
DataType data_type;
if (op->Attrs().Get("T", &data_type) != OkStatus()) {
return false;
}
if (op->GetDeviceParsedName().type != "CPU") {
return false;
}
bool kernel_found = IsKernelRegistered(op->Name(), data_type);
if (!kernel_found) {
return false;
}
auto it = mkl_eager_ops_.find(op->Name());
if (it != mkl_eager_ops_.end()) {
if (it->second.RewriteRule(op)) {
return true;
}
}
return false;
}
bool MklEagerOpRewrite::IsKernelRegistered(string op_name, DataType dt) {
auto element = mkl_eager_ops_.find(op_name);
if (element != mkl_eager_ops_.end()) {
return (mkl_op_registry::IsMklOp(
mkl_op_registry::GetMklNativeOpName(op_name), dt, true) ||
mkl_op_registry::IsMklOp(mkl_op_registry::GetMklOpName(op_name), dt,
true));
} else {
return false;
}
}
Status MklEagerOpRewrite::RewriteToMklOp(
EagerOperation* orig_op, std::unique_ptr<EagerOperation>* mkl_op) {
TF_RETURN_IF_ERROR(
mkl_eager_ops_[orig_op->Name()].CreateMklOp(orig_op, mkl_op));
return OkStatus();
}
bool MklEagerOpRewrite::RewriteConv2D(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
string padding;
TF_CHECK_OK(GetNodeAttr(ndef, "padding", &padding));
return (padding != "EXPLICIT");
}
bool MklEagerOpRewrite::RewriteSparseMatrixMatMul(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
DataType T;
Tensor tensor;
bool adjoint_a, adjoint_b, transpose_a, transpose_b, transpose_out;
TF_CHECK_OK(GetNodeAttr(ndef, "T", &T));
if (T != DT_FLOAT) {
VLOG(1) << "_MklSparseMatrixMatMul only supports DT_FLOAT";
return false;
}
TF_CHECK_OK(GetNodeAttr(ndef, "adjoint_a", &adjoint_a));
TF_CHECK_OK(GetNodeAttr(ndef, "adjoint_b", &adjoint_b));
if (adjoint_a || adjoint_b) {
VLOG(1)
<< "_MklNativeSparseMatrixMatMul doesn't support adjointing matrices";
return false;
}
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_a", &transpose_a));
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_b", &transpose_b));
TF_CHECK_OK(GetNodeAttr(ndef, "transpose_output", &transpose_out));
if (transpose_a || transpose_b || transpose_out) {
VLOG(1)
<< "_MklNativeSparseMatrixMatMul doesn't support transposing matrices";
return false;
}
return true;
}
bool MklEagerOpRewrite::RewriteFusedBatchNormV3(EagerOperation* op) {
const NodeDef& ndef = op->MutableAttrs()->BuildNodeDef();
if (Check5DFormat(ndef)) {
VLOG(1) << "Eager Op Rewrite: FusedBatchNorm(Grad)V3 op currently does not "
<< "support 5D tensors.";
return false;
}
return true;
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_MKL)
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/eager/eager_op_rewrite_registry.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/mkl_util.h"
namespace tensorflow {
class EagerOpRewriteTest : public ::testing::Test {
public:
EagerOpRewriteTest() : eager_ctx_(nullptr) {}
~EagerOpRewriteTest() {
if (eager_ctx_) {
eager_ctx_->Unref();
}
}
std::unique_ptr<tensorflow::EagerOperation> CreateOp(const string op_name) {
std::unique_ptr<DeviceMgr> device_mgr =
std::make_unique<StaticDeviceMgr>(DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
bool async = false;
auto rendezvous =
tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
eager_ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
async, device_mgr.get(), false, std::move(rendezvous), nullptr, nullptr,
true);
EagerExecutor executor_(false);
std::unique_ptr<tensorflow::EagerOperation> op(
new tensorflow::EagerOperation(eager_ctx_));
EXPECT_EQ(OkStatus(),
op.get()->Reset(op_name.c_str(), nullptr, false, &executor_));
EXPECT_EQ(OkStatus(), op.get()->SetDeviceName(
"/job:localhost/replica:0/task:0/device:CPU:0"));
return op;
}
void CheckRewrite(EagerOperation* orig_op, string expected_op_name) {
std::unique_ptr<tensorflow::EagerOperation> out_op;
EXPECT_EQ(OkStatus(),
EagerOpRewriteRegistry::Global()->RunRewrite(
EagerOpRewriteRegistry::POST_PLACEMENT, orig_op, &out_op));
string actual_op_name = orig_op->Name();
if (out_op) {
actual_op_name = out_op->Name();
}
EXPECT_EQ(actual_op_name, expected_op_name);
}
protected:
tensorflow::EagerContext* eager_ctx_;
};
#define CONV_FORWARD_OPS "Conv2D", "Conv3D", "DepthwiseConv2dNative"
#define CONV_BACKWARD_OPS \
"Conv2DBackpropInput", "Conv2DBackpropFilter", "Conv3DBackpropFilterV2", \
"Conv3DBackpropInputV2", "DepthwiseConv2dNativeBackpropFilter", \
"DepthwiseConv2dNativeBackpropInput"
#define CONV_OPS CONV_FORWARD_OPS, CONV_BACKWARD_OPS
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "VALID"); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(conv_ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOps_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_FORWARD_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "EXPLICIT"); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(conv_ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOpsExplicitPadding_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> conv_ops = {CONV_BACKWARD_OPS}; \
for (int i = 0; i < conv_ops.size(); ++i) { \
auto orig_op = CreateOp(conv_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("padding", "EXPLICIT"); \
CheckRewrite(orig_op.get(), conv_ops[i]); \
} \
}
REGISTER_TEST_ALL_TYPES(ConvOpsExplicitPadding_Negative);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> ops = {"AvgPool", \
"AvgPoolGrad", \
"AvgPool3D", \
"AvgPool3DGrad", \
"BatchMatMul", \
"Einsum", \
"FusedBatchNorm", \
"FusedBatchNormV2", \
"FusedBatchNormV3", \
"FusedBatchNormGrad", \
"FusedBatchNormGradV2", \
"FusedBatchNormGradV3", \
"MatMul"}; \
for (int i = 0; i < ops.size(); ++i) { \
auto orig_op = CreateOp(ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
CheckRewrite(orig_op.get(), \
mkl_op_registry::GetMklNativeOpName(ops[i])); \
} \
}
REGISTER_TEST_ALL_TYPES(MostOps_Positive);
#undef REGISTER_TEST
#define REGISTER_TEST(NAME, T, INPUT) \
TEST_F(EagerOpRewriteTest, NAME##_##T) { \
std::vector<string> Fused_BN_ops = {"FusedBatchNormV3", \
"FusedBatchNormGradV3"}; \
for (int i = 0; i < Fused_BN_ops.size(); ++i) { \
auto orig_op = CreateOp(Fused_BN_ops[i]); \
orig_op->MutableAttrs()->Set("T", T); \
orig_op->MutableAttrs()->Set("data_format", "" DATA_FORMAT ""); \
CheckRewrite(orig_op.get(), Fused_BN_ops[i]); \
} \
}
#define DATA_FORMAT "NCDHW"
REGISTER_TEST_ALL_TYPES(FusedBatchNormV3_5D_Negative_1);
#undef DATA_FORMAT
#define DATA_FORMAT "NDHWC"
REGISTER_TEST_ALL_TYPES(FusedBatchNormV3_5D_Negative_2);
#undef DATA_FORMAT
#undef REGISTER_TEST
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/mkl_eager_op_rewrite_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b0c4542-33f1-461f-bcc9-aba9c6c10683 | cpp | abseil/abseil-cpp | endian | absl/base/internal/endian.h | absl/base/internal/endian_test.cc | #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
#define ABSL_BASE_INTERNAL_ENDIAN_H_
#include <cstdint>
#include <cstdlib>
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/nullability.h"
#include "absl/base/port.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
inline uint64_t gbswap_64(uint64_t host_int) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
return __builtin_bswap64(host_int);
#elif defined(_MSC_VER)
return _byteswap_uint64(host_int);
#else
return (((host_int & uint64_t{0xFF}) << 56) |
((host_int & uint64_t{0xFF00}) << 40) |
((host_int & uint64_t{0xFF0000}) << 24) |
((host_int & uint64_t{0xFF000000}) << 8) |
((host_int & uint64_t{0xFF00000000}) >> 8) |
((host_int & uint64_t{0xFF0000000000}) >> 24) |
((host_int & uint64_t{0xFF000000000000}) >> 40) |
((host_int & uint64_t{0xFF00000000000000}) >> 56));
#endif
}
inline uint32_t gbswap_32(uint32_t host_int) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
return __builtin_bswap32(host_int);
#elif defined(_MSC_VER)
return _byteswap_ulong(host_int);
#else
return (((host_int & uint32_t{0xFF}) << 24) |
((host_int & uint32_t{0xFF00}) << 8) |
((host_int & uint32_t{0xFF0000}) >> 8) |
((host_int & uint32_t{0xFF000000}) >> 24));
#endif
}
inline uint16_t gbswap_16(uint16_t host_int) {
#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
return __builtin_bswap16(host_int);
#elif defined(_MSC_VER)
return _byteswap_ushort(host_int);
#else
return (((host_int & uint16_t{0xFF}) << 8) |
((host_int & uint16_t{0xFF00}) >> 8));
#endif
}
#ifdef ABSL_IS_LITTLE_ENDIAN
inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
#elif defined ABSL_IS_BIG_ENDIAN
inline uint16_t ghtons(uint16_t x) { return x; }
inline uint32_t ghtonl(uint32_t x) { return x; }
inline uint64_t ghtonll(uint64_t x) { return x; }
#else
#error \
"Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
"ABSL_IS_LITTLE_ENDIAN must be defined"
#endif
inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
namespace little_endian {
#ifdef ABSL_IS_LITTLE_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }
inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }
inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }
inline constexpr bool IsLittleEndian() { return true; }
#elif defined ABSL_IS_BIG_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
inline constexpr bool IsLittleEndian() { return false; }
#endif
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
inline uint16_t Load16(absl::Nonnull<const void *> p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
inline uint32_t Load32(absl::Nonnull<const void *> p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
inline void Store32(absl::Nonnull<void *> p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
inline uint64_t Load64(absl::Nonnull<const void *> p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
}
namespace big_endian {
#ifdef ABSL_IS_LITTLE_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
inline constexpr bool IsLittleEndian() { return true; }
#elif defined ABSL_IS_BIG_ENDIAN
inline uint16_t FromHost16(uint16_t x) { return x; }
inline uint16_t ToHost16(uint16_t x) { return x; }
inline uint32_t FromHost32(uint32_t x) { return x; }
inline uint32_t ToHost32(uint32_t x) { return x; }
inline uint64_t FromHost64(uint64_t x) { return x; }
inline uint64_t ToHost64(uint64_t x) { return x; }
inline constexpr bool IsLittleEndian() { return false; }
#endif
inline uint8_t FromHost(uint8_t x) { return x; }
inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
inline uint8_t ToHost(uint8_t x) { return x; }
inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
inline int8_t FromHost(int8_t x) { return x; }
inline int16_t FromHost(int16_t x) {
return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
}
inline int32_t FromHost(int32_t x) {
return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
}
inline int64_t FromHost(int64_t x) {
return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
}
inline int8_t ToHost(int8_t x) { return x; }
inline int16_t ToHost(int16_t x) {
return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
}
inline int32_t ToHost(int32_t x) {
return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
}
inline int64_t ToHost(int64_t x) {
return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
}
inline uint16_t Load16(absl::Nonnull<const void *> p) {
return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
}
inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
}
inline uint32_t Load32(absl::Nonnull<const void *> p) {
return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
}
inline void Store32(absl::Nonnull<void *>p, uint32_t v) {
ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
}
inline uint64_t Load64(absl::Nonnull<const void *> p) {
return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
}
inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/internal/endian.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
const uint64_t kInitialNumber{0x0123456789abcdef};
const uint64_t k64Value{kInitialNumber};
const uint32_t k32Value{0x01234567};
const uint16_t k16Value{0x0123};
const int kNumValuesToTest = 1000000;
const int kRandomSeed = 12345;
#if defined(ABSL_IS_BIG_ENDIAN)
const uint64_t kInitialInNetworkOrder{kInitialNumber};
const uint64_t k64ValueLE{0xefcdab8967452301};
const uint32_t k32ValueLE{0x67452301};
const uint16_t k16ValueLE{0x2301};
const uint64_t k64ValueBE{kInitialNumber};
const uint32_t k32ValueBE{k32Value};
const uint16_t k16ValueBE{k16Value};
#elif defined(ABSL_IS_LITTLE_ENDIAN)
const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
const uint64_t k64ValueLE{kInitialNumber};
const uint32_t k32ValueLE{k32Value};
const uint16_t k16ValueLE{k16Value};
const uint64_t k64ValueBE{0xefcdab8967452301};
const uint32_t k32ValueBE{0x67452301};
const uint16_t k16ValueBE{0x2301};
#endif
std::vector<uint16_t> GenerateAllUint16Values() {
std::vector<uint16_t> result;
result.reserve(size_t{1} << (sizeof(uint16_t) * 8));
for (uint32_t i = std::numeric_limits<uint16_t>::min();
i <= std::numeric_limits<uint16_t>::max(); ++i) {
result.push_back(static_cast<uint16_t>(i));
}
return result;
}
template<typename T>
std::vector<T> GenerateRandomIntegers(size_t num_values_to_test) {
std::vector<T> result;
result.reserve(num_values_to_test);
std::mt19937_64 rng(kRandomSeed);
for (size_t i = 0; i < num_values_to_test; ++i) {
result.push_back(rng());
}
return result;
}
void ManualByteSwap(char* bytes, int length) {
if (length == 1)
return;
EXPECT_EQ(0, length % 2);
for (int i = 0; i < length / 2; ++i) {
int j = (length - 1) - i;
using std::swap;
swap(bytes[i], bytes[j]);
}
}
template<typename T>
inline T UnalignedLoad(const char* p) {
static_assert(
sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
"Unexpected type size");
switch (sizeof(T)) {
case 1: return *reinterpret_cast<const T*>(p);
case 2:
return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
case 4:
return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
case 8:
return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
default:
return {};
}
}
template <typename T, typename ByteSwapper>
static void GBSwapHelper(const std::vector<T>& host_values_to_test,
const ByteSwapper& byte_swapper) {
for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
it != host_values_to_test.end(); ++it) {
T host_value = *it;
char actual_value[sizeof(host_value)];
memcpy(actual_value, &host_value, sizeof(host_value));
byte_swapper(actual_value);
char expected_value[sizeof(host_value)];
memcpy(expected_value, &host_value, sizeof(host_value));
ManualByteSwap(expected_value, sizeof(host_value));
ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
<< "Swap output for 0x" << std::hex << host_value << " does not match. "
<< "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
<< "actual: 0x" << UnalignedLoad<T>(actual_value);
}
}
void Swap16(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE16(
bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
}
void Swap32(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE32(
bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
}
void Swap64(char* bytes) {
ABSL_INTERNAL_UNALIGNED_STORE64(
bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
}
TEST(EndianessTest, Uint16) {
GBSwapHelper(GenerateAllUint16Values(), &Swap16);
}
TEST(EndianessTest, Uint32) {
GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
}
TEST(EndianessTest, Uint64) {
GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
}
TEST(EndianessTest, ghtonll_gntohll) {
uint32_t test = 0x01234567;
EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
uint64_t comp = absl::ghtonll(kInitialNumber);
EXPECT_EQ(comp, kInitialInNetworkOrder);
comp = absl::gntohll(kInitialInNetworkOrder);
EXPECT_EQ(comp, kInitialNumber);
uint64_t value = 1;
for (int i = 0; i < 100; ++i) {
comp = absl::ghtonll(absl::gntohll(value));
EXPECT_EQ(value, comp);
comp = absl::gntohll(absl::ghtonll(value));
EXPECT_EQ(value, comp);
value *= 37;
}
}
TEST(EndianessTest, little_endian) {
uint64_t comp = little_endian::FromHost16(k16Value);
EXPECT_EQ(comp, k16ValueLE);
comp = little_endian::ToHost16(k16ValueLE);
EXPECT_EQ(comp, k16Value);
comp = little_endian::FromHost32(k32Value);
EXPECT_EQ(comp, k32ValueLE);
comp = little_endian::ToHost32(k32ValueLE);
EXPECT_EQ(comp, k32Value);
comp = little_endian::FromHost64(k64Value);
EXPECT_EQ(comp, k64ValueLE);
comp = little_endian::ToHost64(k64ValueLE);
EXPECT_EQ(comp, k64Value);
uint16_t u16Buf;
uint32_t u32Buf;
uint64_t u64Buf;
little_endian::Store16(&u16Buf, k16Value);
EXPECT_EQ(u16Buf, k16ValueLE);
comp = little_endian::Load16(&u16Buf);
EXPECT_EQ(comp, k16Value);
little_endian::Store32(&u32Buf, k32Value);
EXPECT_EQ(u32Buf, k32ValueLE);
comp = little_endian::Load32(&u32Buf);
EXPECT_EQ(comp, k32Value);
little_endian::Store64(&u64Buf, k64Value);
EXPECT_EQ(u64Buf, k64ValueLE);
comp = little_endian::Load64(&u64Buf);
EXPECT_EQ(comp, k64Value);
}
TEST(EndianessTest, big_endian) {
uint16_t u16Buf;
uint32_t u32Buf;
uint64_t u64Buf;
unsigned char buffer[10];
big_endian::Store16(&u16Buf, k16Value);
EXPECT_EQ(u16Buf, k16ValueBE);
uint64_t comp = big_endian::Load16(&u16Buf);
EXPECT_EQ(comp, k16Value);
big_endian::Store32(&u32Buf, k32Value);
EXPECT_EQ(u32Buf, k32ValueBE);
comp = big_endian::Load32(&u32Buf);
EXPECT_EQ(comp, k32Value);
big_endian::Store64(&u64Buf, k64Value);
EXPECT_EQ(u64Buf, k64ValueBE);
comp = big_endian::Load64(&u64Buf);
EXPECT_EQ(comp, k64Value);
big_endian::Store16(buffer + 1, k16Value);
EXPECT_EQ(u16Buf, k16ValueBE);
comp = big_endian::Load16(buffer + 1);
EXPECT_EQ(comp, k16Value);
big_endian::Store32(buffer + 1, k32Value);
EXPECT_EQ(u32Buf, k32ValueBE);
comp = big_endian::Load32(buffer + 1);
EXPECT_EQ(comp, k32Value);
big_endian::Store64(buffer + 1, k64Value);
EXPECT_EQ(u64Buf, k64ValueBE);
comp = big_endian::Load64(buffer + 1);
EXPECT_EQ(comp, k64Value);
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/endian.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/endian_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
faa2d16e-aafb-4563-bd36-573f59beadb8 | cpp | tensorflow/tensorflow | compilation_timer | tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h | tensorflow/compiler/mlir/tf2xla/internal/compilation_timer_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_COMPILATION_TIMER_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_COMPILATION_TIMER_H_
#include <chrono>
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
struct CompilationTimer {
uint64_t start_cycles =
tensorflow::profile_utils::CpuUtils::GetCurrentClockCycle();
uint64_t ElapsedCycles() {
return tensorflow::profile_utils::CpuUtils::GetCurrentClockCycle() -
start_cycles;
}
int64_t ElapsedCyclesInMilliseconds() {
std::chrono::duration<double> duration =
tensorflow::profile_utils::CpuUtils::ConvertClockCycleToTime(
ElapsedCycles());
return std::chrono::duration_cast<std::chrono::milliseconds>(duration)
.count();
}
};
#endif | #include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
namespace {
TEST(CompilationTimer, MeasuresElapsedTime) {
uint64_t timer_result_in_milliseconds;
{
CompilationTimer timer;
absl::SleepFor(absl::Milliseconds(100));
timer_result_in_milliseconds = timer.ElapsedCyclesInMilliseconds();
}
ASSERT_THAT(timer_result_in_milliseconds, testing::Ne(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/compilation_timer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df40c77a-7062-4e92-a882-a8f6fe66ce8f | cpp | tensorflow/tensorflow | lockable | third_party/xla/xla/service/lockable.h | third_party/xla/xla/service/lockable_test.cc | #ifndef XLA_SERVICE_LOCKABLE_H_
#define XLA_SERVICE_LOCKABLE_H_
#include <string>
#include "absl/base/thread_annotations.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "tsl/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
template <typename T>
struct LockableName {
static std::string ToString(const T& value) {
return absl::StrFormat("lockable %p", &value);
}
};
template <typename T, typename LockableName = LockableName<T>>
class Lockable {
public:
class Lock {
public:
Lock() = default;
Lock(Lock&& other) {
lockable_ = other.lockable_;
other.lockable_ = nullptr;
}
Lock& operator=(Lock&& other) {
lockable_ = other.lockable_;
other.lockable_ = nullptr;
return *this;
}
~Lock() {
if (lockable_) lockable_->Release();
}
T& operator*() const { return lockable_->value_; }
T* operator->() const { return &lockable_->value_; }
operator bool() const { return lockable_ != nullptr; }
std::string ToString() const {
return lockable_ ? lockable_->ToString() : "<empty lock>";
}
private:
friend class Lockable;
explicit Lock(Lockable* lockable) : lockable_(lockable) {}
Lockable* lockable_ = nullptr;
};
Lockable() = default;
explicit Lockable(T value) : value_(std::move(value)) {
VLOG(2) << "Constructed " << LockableName::ToString(value_);
}
template <typename... Args>
explicit Lockable(Args&&... args) : value_(std::forward<Args>(args)...) {
VLOG(2) << "Constructed " << LockableName::ToString(value_);
}
Lockable(const Lockable&) = delete;
Lockable& operator=(const Lockable&) = delete;
~Lockable() {
VLOG(2) << "Destroy " << LockableName::ToString(value_);
absl::MutexLock lock(&mutex_);
CHECK_EQ(is_unlocked_, true);
}
Lock Acquire() {
tsl::profiler::TraceMe trace([&] {
return tsl::profiler::TraceMeEncode("Lockable::Lock::Acquire",
{{"lockable", ToString()}});
});
absl::MutexLock lock(&mutex_);
mutex_.Await(absl::Condition(&is_unlocked_));
VLOG(2) << "Acquired " << LockableName::ToString(value_);
is_unlocked_ = false;
return Lock(this);
}
Lock TryAcquire() {
absl::MutexLock lock(&mutex_);
if (is_unlocked_ == false) {
VLOG(2) << "Failed to acquire " << LockableName::ToString(value_);
return Lock();
}
VLOG(2) << "Acquired " << LockableName::ToString(value_);
is_unlocked_ = false;
return Lock(this);
}
std::string ToString() const { return LockableName::ToString(value_); }
protected:
const T& value() const { return value_; }
private:
friend class Lock;
void Release() {
absl::MutexLock lock(&mutex_);
VLOG(2) << "Released " << LockableName::ToString(value_);
CHECK(!is_unlocked_);
is_unlocked_ = true;
}
T value_;
absl::Mutex mutex_;
bool is_unlocked_ ABSL_GUARDED_BY(mutex_) = true;
};
}
#endif | #include "xla/service/lockable.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/synchronization/blocking_counter.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace xla {
tsl::thread::ThreadPool CreateThreadPool(int32_t size) {
return tsl::thread::ThreadPool(tsl::Env::Default(), "lockable_test", size);
}
template <>
struct LockableName<std::string> {
static std::string ToString(const std::string& str) {
return "lockable string " + str;
}
};
class LockableString : public Lockable<std::string> {
using Lockable::Lockable;
};
TEST(LockableTest, LockProperties) {
LockableString::Lock lock0;
EXPECT_FALSE(lock0);
LockableString str("foo");
LockableString::Lock lock1 = str.Acquire();
EXPECT_TRUE(lock1);
LockableString::Lock lock2 = std::move(lock1);
EXPECT_FALSE(lock1);
EXPECT_TRUE(lock2);
LockableString::Lock lock3 = str.TryAcquire();
EXPECT_FALSE(lock3);
EXPECT_EQ(lock1.ToString(), "<empty lock>");
EXPECT_EQ(lock2.ToString(), "lockable string foo");
EXPECT_EQ(str.ToString(), "lockable string foo");
auto sink = [](LockableString::Lock) {};
sink(std::move(lock2));
LockableString::Lock lock4 = str.TryAcquire();
EXPECT_TRUE(lock4);
}
TEST(LockableTest, ExclusiveAccess) {
absl::BlockingCounter counter(100);
auto thread_pool = CreateThreadPool(10);
LockableString str("foo");
for (size_t i = 0; i < 100; ++i) {
thread_pool.Schedule([&] {
{
auto exclusive_str = str.Acquire();
ASSERT_EQ(*exclusive_str, "foo");
}
counter.DecrementCount();
});
}
counter.Wait();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/lockable.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/lockable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37cc81f3-453c-4552-97e5-0cd53e708bdd | cpp | google/tensorstore | proto_util | tensorstore/proto/proto_util.cc | tensorstore/proto/proto_util_test.cc | #include "tensorstore/proto/proto_util.h"
#include <stddef.h>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "google/protobuf/io/tokenizer.h"
#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
namespace tensorstore {
namespace {
class ErrorCollector : public google::protobuf::io::ErrorCollector {
public:
ErrorCollector() = default;
~ErrorCollector() override = default;
void RecordError(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
void RecordWarning(int line, google::protobuf::io::ColumnNumber column,
absl::string_view message) override {
errors.emplace_back(absl::StrCat("Line: ", std::max(1, line + 1),
", col: ", column + 1, ": ", message));
}
std::vector<std::string> errors;
};
class ConcisePrinter : public google::protobuf::TextFormat::FastFieldValuePrinter {
public:
void PrintString(
const std::string& val,
google::protobuf::TextFormat::BaseTextGenerator* generator) const override {
if (val.size() <= 80) {
FastFieldValuePrinter::PrintString(val, generator);
return;
}
std::string output = absl::StrFormat("<%d bytes: ", val.size());
for (size_t i = 0; i < 8; i++) {
absl::StrAppendFormat(&output, "\\x%02x", val[i]);
}
absl::StrAppend(&output, "...>");
generator->PrintString(output);
}
};
}
bool TryParseTextProto(absl::string_view asciipb, google::protobuf::Message* msg,
std::vector<std::string>* errors,
bool allow_partial_messages,
bool allow_unknown_extensions) {
google::protobuf::TextFormat::Parser parser;
parser.AllowPartialMessage(allow_partial_messages);
parser.AllowUnknownExtension(allow_unknown_extensions);
ErrorCollector error_collector;
parser.RecordErrorsTo(&error_collector);
google::protobuf::io::ArrayInputStream asciipb_istream(asciipb.data(), asciipb.size());
if (parser.Parse(&asciipb_istream, msg)) {
return true;
}
msg->Clear();
if (errors) {
*errors = std::move(error_collector.errors);
}
return false;
}
std::string ConciseDebugString(const google::protobuf::Message& message) {
google::protobuf::TextFormat::Printer printer;
printer.SetDefaultFieldValuePrinter(new ConcisePrinter());
printer.SetSingleLineMode(true);
printer.SetExpandAny(true);
std::string debugstring;
printer.PrintToString(message, &debugstring);
if (!debugstring.empty() && debugstring.back() == ' ') {
debugstring.pop_back();
}
return debugstring;
}
} | #include "tensorstore/proto/proto_util.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/proto/array.pb.h"
#include "tensorstore/proto/protobuf_matchers.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::ConciseDebugString;
using ::tensorstore::TryParseTextProto;
TEST(ProtoUtilTest, Basic) {
constexpr const char kProto[] = R"pb(
dtype: "int64"
shape: [ 1, 2, 4 ]
int_data: [ 1, 0, 2, 2, 4, 5, 6, 7 ]
)pb";
::tensorstore::proto::Array proto;
EXPECT_TRUE(TryParseTextProto(kProto, &proto));
EXPECT_THAT(proto, EqualsProto(kProto));
std::vector<std::string> errors;
EXPECT_FALSE(TryParseTextProto("a: 'foo'", &proto, &errors));
EXPECT_FALSE(errors.empty());
}
TEST(ProtoUtilTest, ConciseDebugString) {
::tensorstore::proto::Array proto;
proto.set_dtype("int64");
proto.set_void_data(
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}"
"{01234567890123456789012345678901234567890123456789012345678901}");
EXPECT_EQ(
"dtype: \"int64\" "
"void_data: <256 bytes: \\x7b\\x30\\x31\\x32\\x33\\x34\\x35\\x36...>",
ConciseDebugString(proto));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_util.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/proto_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
992ee0d8-2b56-4259-a01e-6a37f75f1b53 | cpp | tensorflow/tensorflow | philox_random | tensorflow/core/lib/random/philox_random.h | third_party/xla/xla/tsl/lib/random/philox_random_test.cc | #ifndef TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_
#define TENSORFLOW_CORE_LIB_RANDOM_PHILOX_RANDOM_H_
#include "xla/tsl/lib/random/philox_random.h"
namespace tensorflow {
namespace random {
using tsl::random::Array;
using tsl::random::PhiloxRandom;
}
}
#endif | #include "xla/tsl/lib/random/philox_random.h"
#include <math.h>
#include <algorithm>
#include <functional>
#include <unordered_map>
#include <vector>
#include "xla/tsl/lib/random/philox_random_test_utils.h"
#include "xla/tsl/lib/random/random_distributions.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/random.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace random {
namespace {
class TrivialPhiloxDistribution {
public:
static constexpr int kResultElementCount = PhiloxRandom::kResultElementCount;
typedef PhiloxRandom::ResultType ResultType;
typedef PhiloxRandom::ResultElementType ResultElementType;
PHILOX_DEVICE_INLINE
ResultType operator()(PhiloxRandom* gen) { return (*gen)(); }
};
TEST(PhiloxRandomTest, SkipMatchTest) {
constexpr int count = 1024;
constexpr int skip_count = 2048;
uint64 test_seed = GetTestSeed();
std::vector<uint32> v1(count);
{
PhiloxRandom gen(test_seed);
gen.Skip(skip_count / 4);
FillRandoms<TrivialPhiloxDistribution>(gen, &v1[0], v1.size());
}
std::vector<uint32> v2(count + skip_count);
{
PhiloxRandom gen(test_seed);
FillRandoms<TrivialPhiloxDistribution>(gen, &v2[0], v2.size());
}
for (int i = 0; i < count; ++i) {
ASSERT_EQ(v1[i], v2[i + skip_count]);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/random/philox_random.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/philox_random_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
85cb7bf6-0fd4-4357-9ecf-51f39358593e | cpp | tensorflow/tensorflow | scoped_activate_context | third_party/xla/xla/stream_executor/gpu/scoped_activate_context.cc | third_party/xla/xla/stream_executor/gpu/scoped_activate_context_test.cc | #include "xla/stream_executor/gpu/scoped_activate_context.h"
#include "absl/log/check.h"
#include "xla/stream_executor/gpu/context.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/logging.h"
namespace stream_executor::gpu {
namespace {
thread_local struct ThreadLocalData {
Context* context;
int device_ordinal;
int depth;
} tls_data = {};
}
ScopedActivateContext::ScopedActivateContext(GpuExecutor* gpu_executor)
: ScopedActivateContext(gpu_executor->gpu_context()) {}
ScopedActivateContext::ScopedActivateContext(StreamExecutor* executor)
: ScopedActivateContext(ExtractGpuExecutor(executor)) {}
ScopedActivateContext::ScopedActivateContext(gpu::Context* gpu_context) {
auto* tls = &tls_data;
if (tls->depth == 0) {
VLOG(3) << "ScopedActivateContext switching to "
<< gpu_context->device_ordinal();
gpu_context->SetActive();
tls->depth = 1;
tls->device_ordinal = gpu_context->device_ordinal();
tls->context = gpu_context;
to_restore_ = nullptr;
return;
}
tls->depth++;
if (tls->device_ordinal == gpu_context->device_ordinal()) {
DCHECK(gpu_context->IsActive());
return;
}
VLOG(3) << "ScopedActivateContext switching context from "
<< tls->device_ordinal << " to " << gpu_context->device_ordinal();
to_restore_ = tls->context;
gpu_context->SetActive();
tls->device_ordinal = gpu_context->device_ordinal();
tls->context = gpu_context;
}
ScopedActivateContext::~ScopedActivateContext() {
auto* tls = &tls_data;
tls->depth--;
DCHECK_GE(tls->depth, 0);
if (to_restore_ == nullptr) {
return;
}
to_restore_->SetActive();
tls->device_ordinal = to_restore_->device_ordinal();
tls->context = to_restore_;
}
} | #include "xla/stream_executor/gpu/scoped_activate_context.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/stream_executor/gpu/mock_context.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
using testing::Return;
namespace stream_executor::gpu {
namespace {
TEST(ScopedActivateContextTest, SetsActiveOnceForSameContextWorks) {
MockContext context;
EXPECT_CALL(context, SetActive).Times(1);
EXPECT_CALL(context, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context, IsActive).WillRepeatedly(Return(true));
{
ScopedActivateContext scoped_activate_context1(&context);
{ ScopedActivateContext scoped_activate_context2(&context); }
}
}
TEST(ScopedActivateContextTest, TwoDifferentContextsWorks) {
MockContext context1;
EXPECT_CALL(context1, SetActive).Times(2);
EXPECT_CALL(context1, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context1, IsActive).WillRepeatedly(Return(true));
MockContext context2;
EXPECT_CALL(context2, SetActive).Times(1);
EXPECT_CALL(context2, device_ordinal).WillRepeatedly(Return(2));
EXPECT_CALL(context2, IsActive).WillRepeatedly(Return(true));
{
ScopedActivateContext scoped_activate_context1(&context1);
{ ScopedActivateContext scoped_activate_context2(&context2); }
}
}
TEST(ScopedActivateContextTest, TwoThreadsBothSetActiveButDontRestore) {
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "test", 2);
thread_pool.Schedule([&]() {
MockContext context1;
EXPECT_CALL(context1, SetActive).Times(1);
EXPECT_CALL(context1, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context1, IsActive).Times(0);
ScopedActivateContext scoped_activate_context1(&context1);
});
thread_pool.Schedule([&]() {
MockContext context2;
EXPECT_CALL(context2, SetActive).Times(1);
EXPECT_CALL(context2, device_ordinal).WillRepeatedly(Return(1));
EXPECT_CALL(context2, IsActive).Times(0);
ScopedActivateContext scoped_activate_context2(&context2);
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/scoped_activate_context.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/gpu/scoped_activate_context_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50716b0d-9733-485e-b753-395edeb19808 | cpp | tensorflow/tensorflow | convert_nodes | tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc | tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc | #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <bitset>
#include <cmath>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/algorithm_selector.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/quantization_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/ops/slice_ops.h"
#include "tensorflow/compiler/tf2tensorrt/convert/timing_cache.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_experimental_features.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_shape_optimization_profiles.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/strided_slice_op.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
#include "third_party/tensorrt/NvInferPlugin.h"
#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
#define TFTRT_CHECK_INPUT_SIZE(size, exp_size, node_def) \
if ((size) != (exp_size)) { \
TFTRT_ERROR(errors::InvalidArgument, node_def.op(), " got ", (size), \
" inputs but expected ", (exp_size)); \
}
#define MAX_KERNEL_DIMS_PRODUCT(x) (int64_t(std::pow(100000.0F, (x) * 0.5F)))
namespace tensorflow {
namespace tensorrt {
namespace convert {
using absl::StrAppend;
using absl::StrCat;
namespace {
#define ADD_LAYER(layer_name) \
case nvinfer1::LayerType::k##layer_name: \
return #layer_name;
const char* LayerTypeToString(nvinfer1::LayerType layer_type) {
switch (layer_type) {
ADD_LAYER(CONVOLUTION)
ADD_LAYER(FULLY_CONNECTED)
ADD_LAYER(ACTIVATION)
ADD_LAYER(POOLING)
ADD_LAYER(LRN)
ADD_LAYER(SCALE)
ADD_LAYER(SOFTMAX)
ADD_LAYER(DECONVOLUTION)
ADD_LAYER(CONCATENATION)
ADD_LAYER(ELEMENTWISE)
ADD_LAYER(PLUGIN)
ADD_LAYER(UNARY)
ADD_LAYER(PADDING)
ADD_LAYER(SHUFFLE)
ADD_LAYER(REDUCE)
ADD_LAYER(TOPK)
ADD_LAYER(GATHER)
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(GRID_SAMPLE)
#endif
ADD_LAYER(MATRIX_MULTIPLY)
ADD_LAYER(RAGGED_SOFTMAX)
ADD_LAYER(CONSTANT)
ADD_LAYER(RNN_V2)
ADD_LAYER(IDENTITY)
ADD_LAYER(PLUGIN_V2)
ADD_LAYER(SLICE)
ADD_LAYER(SHAPE)
ADD_LAYER(PARAMETRIC_RELU)
ADD_LAYER(RESIZE)
ADD_LAYER(TRIP_LIMIT)
ADD_LAYER(RECURRENCE)
ADD_LAYER(ITERATOR)
ADD_LAYER(LOOP_OUTPUT)
ADD_LAYER(SELECT)
ADD_LAYER(FILL)
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(QUANTIZE)
ADD_LAYER(DEQUANTIZE)
#endif
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_LAYER(CONDITION)
ADD_LAYER(CONDITIONAL_INPUT)
ADD_LAYER(CONDITIONAL_OUTPUT)
ADD_LAYER(SCATTER)
ADD_LAYER(EINSUM)
ADD_LAYER(ASSERTION)
#endif
#if IS_TRT_VERSION_GE(8, 5, 0, 0)
ADD_LAYER(ONE_HOT)
ADD_LAYER(NON_ZERO)
ADD_LAYER(NMS)
#endif
#if IS_TRT_VERSION_GE(8, 6, 0, 0)
ADD_LAYER(REVERSE_SEQUENCE)
#endif
#if !IS_TRT_VERSION_GE(8, 0, 0, 0)
ADD_LAYER(RNN)
#endif
default:
return "UNKNOWN_LAYER";
}
}
#undef ADD_LAYER
void SetLayerNameHelper(nvinfer1::ILayer* layer, absl::string_view engine_name,
absl::string_view tf_name) {
const char* trt_name = LayerTypeToString(layer->getType());
layer->setName(
absl::StrCat(engine_name, "/", tf_name, ":", trt_name).c_str());
}
std::string GetLayerNameSuffix(absl::string_view sub_op_name,
std::optional<int> sub_op_instance) {
std::string op_suffix(sub_op_name);
if (sub_op_instance.has_value()) {
op_suffix =
absl::StrCat(op_suffix, "_", std::to_string(sub_op_instance.value()));
}
return op_suffix;
}
}
bool IsEngineInput(absl::string_view name) {
return absl::StartsWith(name, IONamePrefixes::kInputPHName);
}
bool IsEngineOutput(absl::string_view name) {
return absl::StartsWith(name, IONamePrefixes::kOutputPHName);
}
void GetOutputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int out_port,
PartialTensorShape* shape, DataType* dtype) {
if (graph_properties.HasOutputProperties(node->name())) {
auto output_params = graph_properties.GetOutputProperties(node->name());
auto out_shape = output_params.at(out_port);
*dtype = out_shape.dtype();
*shape = out_shape.shape();
} else {
LOG(INFO) << "Unknown output shape at node: " << node->name();
*dtype = node->output_type(out_port);
}
}
void GetInputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int in_port,
PartialTensorShape* shape, DataType* dtype) {
if (graph_properties.HasInputProperties(node->name())) {
auto input_params = graph_properties.GetInputProperties(node->name());
auto in_shape = input_params.at(in_port);
*dtype = in_shape.dtype();
*shape = in_shape.shape();
} else {
*dtype = node->input_type(in_port);
}
}
Status ValidateTensorProperties(const string& producer_node_type,
const DataType dtype,
const PartialTensorShape& shape,
const bool use_implicit_batch,
bool validation_only,
nvinfer1::DataType* trt_dtype,
nvinfer1::Dims* trt_dims, int* batch_size) {
TF_RETURN_IF_ERROR(TfTypeToTrtType(dtype, trt_dtype));
if (shape.dims() < 0) {
return errors::InvalidArgument("Input tensor rank is unknown.");
}
const int max_rank = nvinfer1::Dims::MAX_DIMS + (use_implicit_batch ? 1 : 0);
if (shape.dims() > max_rank) {
return errors::OutOfRange("Input tensor rank is greater than ", max_rank);
}
if (use_implicit_batch && (producer_node_type != "Const") &&
(shape.dims() < 1)) {
return errors::InvalidArgument(
"Scalar input tensor is not supported since the first dimension "
"is treated as batch dimension by TRT");
}
StatusOr<DimsAdapter> dims = DimsAdapter::Create(shape, use_implicit_batch);
TRT_ENSURE_OK(dims);
*trt_dims = dims->AsTrtDims();
if (use_implicit_batch) {
*batch_size = shape.dim_size(0);
}
const int first_trt_dim = use_implicit_batch ? 1 : 0;
for (int d = first_trt_dim; d < shape.dims(); ++d) {
if (shape.dim_size(d) == 0) {
return errors::Unimplemented(
"Input tensor with shape ", shape.DebugString(),
" is an empty tensor, which is not supported by TRT");
}
}
if (validation_only) return OkStatus();
if (use_implicit_batch) {
for (int d = first_trt_dim; d < shape.dims(); ++d) {
if (shape.dim_size(d) < 0) {
return errors::InvalidArgument(
"Input tensor with shape ", shape.DebugString(),
" has an unknown non-batch dimension at dim ", d);
}
}
}
return OkStatus();
}
Status GetTrtBroadcastShape(const TRT_TensorOrWeights& operand_l,
const TRT_TensorOrWeights& operand_r,
const bool check_feasibility,
const bool use_implicit_batch,
nvinfer1::Dims* operand_l_new_dims,
nvinfer1::Dims* operand_r_new_dims) {
if (!operand_l.is_tensor() && !operand_r.is_tensor()) {
return errors::InvalidArgument(
"Broadcasting requires at least one of the operands be tensors");
}
constexpr int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
auto compute_output_dims =
[use_implicit_batch](const TRT_TensorOrWeights& input,
int broadcast_num_dims,
std::array<int32_t, max_nb_dims>* output_dims_array,
nvinfer1::Dims* output_dims) -> Status {
const nvinfer1::Dims input_dims = input.GetTrtDims();
absl::c_fill(*output_dims_array, 1);
absl::c_copy(
DimsAdapter(input_dims),
output_dims_array->begin() + broadcast_num_dims - input_dims.nbDims);
if (use_implicit_batch && input.is_tensor()) {
const int true_input_dims = input_dims.nbDims + 1;
if (true_input_dims < broadcast_num_dims) {
return errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported ",
"(tensor #dims ", true_input_dims, " vs broadcast #dims ",
broadcast_num_dims, ")");
}
(*output_dims_array)[0] = -1;
}
auto offt = use_implicit_batch ? 1 : 0;
output_dims->nbDims = broadcast_num_dims - offt;
absl::c_copy(
absl::MakeSpan(*output_dims_array).subspan(offt, broadcast_num_dims),
output_dims->d);
return OkStatus();
};
const int broadcast_num_dims =
std::max(operand_l.GetTrtDims().nbDims +
(use_implicit_batch && operand_l.is_tensor()),
operand_r.GetTrtDims().nbDims +
(use_implicit_batch && operand_r.is_tensor()));
std::array<int32_t, max_nb_dims> output_l, output_r;
TF_RETURN_IF_ERROR(compute_output_dims(operand_l, broadcast_num_dims,
&output_l, operand_l_new_dims));
TF_RETURN_IF_ERROR(compute_output_dims(operand_r, broadcast_num_dims,
&output_r, operand_r_new_dims));
if (check_feasibility) {
for (int i = 0; i < broadcast_num_dims; ++i) {
if (!use_implicit_batch && (output_l[i] == -1 || output_r[i] == -1)) {
continue;
}
if ((output_l[i] != output_r[i]) && (output_l[i] != 1) &&
(output_r[i] != 1)) {
return errors::InvalidArgument("Infeasible broadcast scheme (",
"batch_dim: ", output_l[0], ", ",
DebugString(*operand_l_new_dims), " vs ",
"batch_dim: ", output_r[0], ", ",
DebugString(*operand_r_new_dims), ")");
}
}
}
return OkStatus();
}
Status DynamicBroadcast(ITensorProxyPtr operand,
const OpConverterParams* params,
ITensorProxyPtr* output, int broadcasted_nbDims,
std::optional<int> op_instance) {
int operand_nbDims = operand->getDimensions().nbDims;
if (broadcasted_nbDims > operand_nbDims) {
if (params->validation_only) return OkStatus();
int n_extra_dims = broadcasted_nbDims - operand_nbDims;
VLOG(2) << "Dynamic broadcast adding " << n_extra_dims << " leading 1s";
TF_RETURN_IF_ERROR(params->converter->DynamicReshape(
operand,
{std::make_pair(0, operand_nbDims)},
params,
output,
{n_extra_dims},
op_instance));
} else {
*output = operand;
}
return OkStatus();
}
Status BroadcastWeights(std::unique_ptr<TRT_TensorOrWeights>& p,
const DimsAdapter& broadcasted_dims) {
if (!p->is_weights()) return errors::Internal("Weight input expected");
if (p->GetTrtDims().nbDims != broadcasted_dims.NumDims()) {
TRT_ShapedWeights weights(p->weights());
TF_RETURN_IF_ERROR(weights.SetShape(broadcasted_dims));
p = std::make_unique<TRT_TensorOrWeights>(weights);
}
return OkStatus();
}
Status ApplyBroadcast(std::unique_ptr<TRT_TensorOrWeights>& operand,
const DimsAdapter& broadcasted_dims,
const OpConverterParams* params,
std::optional<int> op_instance) {
if (operand->is_weights()) {
TF_RETURN_IF_ERROR(BroadcastWeights(operand, broadcasted_dims));
} else {
ITensorProxyPtr tensor = nullptr;
auto is_static_shuffle_compatible = [](const auto& dims) {
return absl::c_count(dims, -1) <= 1;
};
if (is_static_shuffle_compatible(broadcasted_dims)) {
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, *operand, broadcasted_dims,
params->validation_only, &tensor, params->node_def));
} else {
TF_RETURN_IF_ERROR(DynamicBroadcast(
operand->tensor(),
params,
&tensor,
broadcasted_dims.NumDims(),
op_instance));
}
operand = std::make_unique<TRT_TensorOrWeights>(tensor);
}
return OkStatus();
}
Status BroadcastTensors(std::unique_ptr<TRT_TensorOrWeights>& operand_l,
std::unique_ptr<TRT_TensorOrWeights>& operand_r,
bool check_feasibility,
const OpConverterParams* params) {
nvinfer1::Dims broadcasted_dims_l, broadcasted_dims_r;
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(
*operand_l, *operand_r, check_feasibility, params->use_implicit_batch,
&broadcasted_dims_l, &broadcasted_dims_r));
if (params->validation_only) return OkStatus();
TF_RETURN_IF_ERROR(ApplyBroadcast(
operand_l,
broadcasted_dims_l,
params,
0));
TF_RETURN_IF_ERROR(ApplyBroadcast(
operand_r,
broadcasted_dims_r,
params,
1));
return OkStatus();
}
ITensorProxyPtr Converter::CreateConstantLayer(const TRT_ShapedWeights& weights,
const nvinfer1::Dims& dims) {
nvinfer1::Weights trt_weights = weights.GetTrtWeights();
nvinfer1::IConstantLayer* layer = network()->addConstant(dims, trt_weights);
if (!layer) return nullptr;
SetLayerName(layer, "_tftrt_constant_",
std::to_string(next_constant_layer_id_));
next_constant_layer_id_++;
ITensorProxyPtr trt_tensor = layer->getOutput(0);
return trt_tensor;
}
template <typename T>
Status CreateScalarConstant(
const OpConverterParams* params, T value, ITensorProxyPtr* tensor,
nvinfer1::DataType trt_type = nvinfer1::DataType::kINT32,
const nvinfer1::Dims& dims = {1, {1}}) {
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(trt_type, dims);
TRT_ENSURE_OK(weights);
TF_RETURN_IF_ERROR(weights->SetValues(value));
*tensor = params->converter->CreateConstantLayer(*weights, dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(*tensor, params->node_def.name());
return OkStatus();
}
Status CreateBroadcastableScalarConstant(const OpConverterParams* params,
float value,
const nvinfer1::Dims& dims,
ITensorProxyPtr* tensor,
const char* dtype_attr_name = "T") {
nvinfer1::DataType trt_type = nvinfer1::DataType::kFLOAT;
AttrSlice attrs(params->node_def);
if (attrs.FindByString(dtype_attr_name) != nullptr) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, dtype_attr_name, &dtype));
TF_RETURN_IF_ERROR(TfTypeToTrtType(dtype, &trt_type));
}
nvinfer1::Dims broadcastable_dims(dims);
for (int i = 0; i < broadcastable_dims.nbDims; i++) {
broadcastable_dims.d[i] = 1;
}
return CreateScalarConstant(params, value, tensor, trt_type,
broadcastable_dims);
}
StatusOr<ITensorProxyPtr> ConcatenateTensors(
const OpConverterParams* params,
const std::vector<ITensorProxyPtr> input_tensors,
std::optional<int> op_instance = std::nullopt) {
std::vector<nvinfer1::ITensor*> trt_input_tensors;
for (const auto& t : input_tensors) {
trt_input_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_input_tensors.data()),
input_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, params->node_def.op());
params->converter->SetLayerName(layer, params->node_def.name(),
"concat_shapes", op_instance);
layer->setAxis(0);
return ITensorProxyPtr(layer->getOutput(0));
}
Status ConvertAxis(int tf_axis, int trt_nb_dims, absl::string_view node_name,
bool use_implicit_batch, int* trt_axis) {
const int tf_nb_dims = trt_nb_dims + (use_implicit_batch ? 1 : 0);
if (tf_axis < -tf_nb_dims || tf_axis >= tf_nb_dims) {
return errors::InvalidArgument(
"Axis value of ", tf_axis, " is out of bounds, must be in range [",
-tf_nb_dims, ", ", tf_nb_dims, "), at ", node_name);
}
if (tf_axis < 0) tf_axis += tf_nb_dims;
if (use_implicit_batch && tf_axis == 0) {
return errors::Unimplemented(
"TensorRT does not allow manipulation of the batch dimension");
}
*trt_axis = use_implicit_batch ? tf_axis - 1 : tf_axis;
return OkStatus();
}
bool AllLengthsEqual(const std::vector<std::vector<int>>& inputs) {
if (inputs.size() == 0) return true;
int length = inputs.at(0).size();
for (int i = 1; i < inputs.size(); i++) {
if (inputs.at(i).size() != length) return false;
}
return true;
}
bool DimsHaveSameSize(const DimsAdapter& lhs, const DimsAdapter& rhs) {
return lhs.Volume() == rhs.Volume();
}
bool AreDimsStaticWithSameSize(const DimsAdapter& lhs, const DimsAdapter& rhs) {
if (!lhs.IsStatic() || !rhs.IsStatic()) return false;
return DimsHaveSameSize(lhs, rhs);
}
bool AreDimsStaticWithDifferentSize(const DimsAdapter& lhs,
const DimsAdapter& rhs) {
if (!lhs.IsStatic() || !rhs.IsStatic()) return false;
return !DimsHaveSameSize(lhs, rhs);
}
static std::vector<std::pair<int, int>> CreateSamePadding(
const nvinfer1::Dims& stride, const nvinfer1::Dims& kernel,
const std::vector<int64_t>& input_dims) {
std::vector<std::pair<int, int>> padding(input_dims.size());
CHECK_EQ(stride.nbDims, input_dims.size());
for (size_t i = 0; i < input_dims.size(); ++i) {
int p = ((input_dims[i] - 1) / stride.d[i]) * stride.d[i] + kernel.d[i] -
input_dims[i];
p = (p > 0) ? p : 0;
int left = p / 2;
int right = p - left;
VLOG(2) << "PADDING_" << i << " pre: " << left << ", post: " << right
<< "paras: " << input_dims[i] << ", " << stride.d[i] << ", "
<< "kernel: " << kernel.d[i];
padding[i] = {left, right};
}
return padding;
}
string GetCommonNameScope(const string& op_name_a, const string& op_name_b) {
size_t last_scope_separator = 0;
const size_t min_size = std::min(op_name_a.size(), op_name_b.size());
for (size_t i = 0; i < min_size; ++i) {
if (op_name_a[i] != op_name_b[i]) break;
if (op_name_a[i] == '/') last_scope_separator = i + 1;
}
return op_name_a.substr(0, last_scope_separator);
}
Status VerifyShapesMatch(absl::Span<const TRT_TensorOrWeights> inputs,
int masked_dim, absl::string_view node_name) {
size_t num_inputs = inputs.size();
if (num_inputs <= 1) return OkStatus();
const nvinfer1::Dims dims_0 = inputs.at(0).GetTrtDims();
for (size_t i = 1; i < num_inputs; ++i) {
const nvinfer1::Dims dim_i = inputs.at(i).GetTrtDims();
if (dim_i.nbDims != dims_0.nbDims) {
return errors::InvalidArgument(
"Received inputs with inconsistent rank, at ", node_name);
}
for (size_t j = 0; j < dims_0.nbDims; ++j) {
if (dim_i.d[j] == -1 || dims_0.d[j] == -1) continue;
if (dim_i.d[j] != dims_0.d[j] && j != masked_dim) {
return errors::InvalidArgument(
"Received inputs with inconsistent shape, at ", node_name);
}
}
}
return OkStatus();
}
template <typename T>
void Reorder5(const nvinfer1::Dims& shape, const T* idata,
const nvinfer1::Dims& istrides, T* odata,
const nvinfer1::Dims& ostrides) {
for (int k = 0; k < shape.d[0]; ++k) {
for (int c = 0; c < shape.d[1]; ++c) {
for (int d = 0; d < shape.d[2]; ++d) {
for (int r = 0; r < shape.d[3]; ++r) {
for (int s = 0; s < shape.d[4]; ++s) {
odata[k * ostrides.d[0] + c * ostrides.d[1] + d * ostrides.d[2] +
r * ostrides.d[3] + s * ostrides.d[4]] =
idata[k * istrides.d[0] + c * istrides.d[1] +
d * istrides.d[2] + r * istrides.d[3] +
s * istrides.d[4]];
}
}
}
}
}
}
template <typename T>
void Reorder4(const nvinfer1::Dims4& shape, const T* idata,
const nvinfer1::Dims4& istrides, T* odata,
const nvinfer1::Dims4& ostrides) {
for (int n = 0; n < shape.d[0]; ++n) {
for (int c = 0; c < shape.d[1]; ++c) {
for (int h = 0; h < shape.d[2]; ++h) {
for (int w = 0; w < shape.d[3]; ++w) {
odata[n * ostrides.d[0] + c * ostrides.d[1] + h * ostrides.d[2] +
w * ostrides.d[3]] =
idata[n * istrides.d[0] + c * istrides.d[1] + h * istrides.d[2] +
w * istrides.d[3]];
}
}
}
}
}
template <typename T>
void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
const nvinfer1::DimsHW& istrides, T* odata,
const nvinfer1::DimsHW& ostrides) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[h * ostrides.h() + w * ostrides.w()] =
idata[h * istrides.h() + w * istrides.w()];
}
}
}
void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights) {
const int c = iweights.Shape().dim(0);
const int k = iweights.Shape().dim(1);
oweights->Shape().dim(0) = k;
oweights->Shape().dim(1) = c;
const nvinfer1::DimsHW istrides = {1, k};
const nvinfer1::DimsHW ostrides = {c, 1};
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder2({k, c}, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder2({k, c}, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type in reorder expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
void ReorderRSCKToKCRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, const int num_groups) {
CHECK(iweights.TrtDType() == oweights->TrtDType());
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
const int r = iweights.Shape().dim(0);
const int s = iweights.Shape().dim(1);
const int c = iweights.Shape().dim(2) / num_groups;
const int k = iweights.Shape().dim(3) * num_groups;
VLOG(2) << "num_groups: " << num_groups << "c" << iweights.Shape().dim(2)
<< " then " << c << "k" << iweights.Shape().dim(3) << " then " << k
<< "r" << iweights.Shape().dim(0) << " then " << r << "s"
<< iweights.Shape().dim(1) << " then " << s;
oweights->Shape().dim(0) = k / num_groups;
oweights->Shape().dim(1) = c * num_groups;
oweights->Shape().dim(2) = r;
oweights->Shape().dim(3) = s;
const nvinfer1::Dims4 istrides = {1, k, s * k * c, c * k};
const nvinfer1::Dims4 ostrides = {c * r * s, r * s, s, 1};
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder4({k, c, r, s}, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder4({k, c, r, s}, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type, expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
nvinfer1::Dims InitDimsN(std::initializer_list<int> list) {
nvinfer1::Dims dim;
dim.nbDims = list.size();
std::copy(list.begin(), list.end(), dim.d);
return dim;
}
void ReorderDRSCKToKCDRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, const int num_groups) {
DCHECK(iweights.TrtDType() == oweights->TrtDType());
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
const int d = iweights.Shape().dim(0);
const int r = iweights.Shape().dim(1);
const int s = iweights.Shape().dim(2);
const int c = iweights.Shape().dim(3) / num_groups;
const int k = iweights.Shape().dim(4) * num_groups;
VLOG(2) << "num_groups: " << num_groups << ", c: " << iweights.Shape().dim(3)
<< " becomes " << c << ", k: " << iweights.Shape().dim(4)
<< " becomes " << k << ", d: " << d << ", r: " << r << ", s: " << s;
oweights->Shape().dim(0) = iweights.Shape().dim(4);
oweights->Shape().dim(1) = iweights.Shape().dim(3);
oweights->Shape().dim(2) = d;
oweights->Shape().dim(3) = r;
oweights->Shape().dim(4) = s;
nvinfer1::Dims shape =
InitDimsN({k, c, d, r, s});
nvinfer1::Dims ostrides =
InitDimsN({c * d * r * s, d * r * s, r * s, s,
1});
nvinfer1::Dims istrides =
InitDimsN({1, k, r * s * c * k, s * c * k,
c * k});
switch (iweights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
Reorder5(shape, iweights.GetPointer<float>(), istrides,
oweights->GetPointer<float>(), ostrides);
break;
}
case nvinfer1::DataType::kHALF: {
Reorder5(shape, iweights.GetPointer<Eigen::half>(), istrides,
oweights->GetPointer<Eigen::half>(), ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type, expected fp32 or fp16 but got "
<< DebugString(iweights.TrtDType());
}
}
OpConverterParams::OpConverterParams(
const NodeDef& node_def, const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs, TrtWeightStore* weight_store,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision)
: node_def(node_def),
inputs(inputs),
outputs(outputs),
validation_only(true),
weight_store(weight_store),
precision_mode(precision_mode),
use_calibration(use_calibration),
use_implicit_batch(use_implicit_batch),
use_explicit_precision(use_explicit_precision) {}
OpConverterParams::OpConverterParams(
Converter* converter, const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs, TrtWeightStore* weight_store)
: converter(converter),
node_def(node_def),
inputs(inputs),
outputs(outputs),
validation_only(false),
weight_store(weight_store),
precision_mode(converter->precision_mode()),
use_calibration(converter->use_calibration()),
use_implicit_batch(converter->use_implicit_batch()),
use_explicit_precision(converter->UseExplicitPrecision()) {}
TrtNodeValidator::TrtNodeValidator(
const grappler::GraphProperties& graph_properties,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision)
: graph_properties_(graph_properties),
precision_mode_(precision_mode),
use_calibration_(use_calibration),
use_implicit_batch_(use_implicit_batch),
use_explicit_precision_(use_explicit_precision) {}
StatusOr<OpConverter> TrtNodeValidator::GetValidator(const std::string& op) {
return GetOpConverterRegistry()->LookUp(op);
}
Status TrtNodeValidator::ConvertToTensorOrWeights(
const NodeDef& node_def, int output_port,
TRT_TensorOrWeights* tensor_or_weights) {
if (node_def.op() == "VarHandleOp" || node_def.op() == "Placeholder") {
AttrSlice attrs(node_def);
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dtype", &dtype));
if (dtype == DataType::DT_RESOURCE) {
ResourceHandle fake_resource;
*tensor_or_weights = TRT_TensorOrWeights(fake_resource);
return OkStatus();
}
}
if (node_def.op() == "Const" || node_def.op() == "VariableV2") {
if (output_port != 0) {
return errors::InvalidArgument(node_def.op(),
" node should only have one output.");
}
std::vector<TRT_TensorOrWeights> inputs;
return ConvertConstToWeights(node_def, inputs, tensor_or_weights);
}
if (node_def.op() == "ReadVariableOp") {
const std::vector<TRT_TensorOrWeights> inputs{
TRT_TensorOrWeights(ResourceHandle())};
return ConvertConstToWeights(node_def, inputs, tensor_or_weights);
}
if (!graph_properties_.HasOutputProperties(node_def.name())) {
return errors::InvalidArgument("Shape and data type are unknown");
}
const auto& output_params =
graph_properties_.GetOutputProperties(node_def.name());
const auto& tensor_properties = output_params.at(output_port);
const DataType dtype = tensor_properties.dtype();
const PartialTensorShape shape = tensor_properties.shape();
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
TF_RETURN_IF_ERROR(ValidateTensorProperties(
node_def.op(), dtype, shape, use_implicit_batch_,
true, &trt_dtype, &trt_dims, &batch_size));
*tensor_or_weights = TRT_TensorOrWeights(trt_dtype, trt_dims, batch_size);
return OkStatus();
}
Status TrtNodeValidator::IsTensorRTCandidate(const Node* node) {
const string& op = node->def().op();
bool is_supported_op = false;
if (absl::c_find(kQuantizationOpNames, op) != kQuantizationOpNames.end()) {
is_supported_op = (precision_mode_ == TrtPrecisionMode::INT8);
} else {
is_supported_op = GetValidator(op).ok();
}
if (!is_supported_op) {
return errors::Unimplemented("Op type ", op, " is not supported.");
}
std::vector<TRT_TensorOrWeights> inputs;
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(node->input_edges(&input_edges));
for (const Edge* edge : input_edges) {
Node* src_node = edge->src();
while (src_node->def().op() == "Identity") {
std::vector<const Edge*> input_edges_temp;
TF_RETURN_IF_ERROR(src_node->input_edges(&input_edges_temp));
src_node = input_edges_temp[0]->src();
}
const NodeDef& src_def = src_node->def();
TRT_TensorOrWeights tensor_or_weights;
Status status = ConvertToTensorOrWeights(src_def, edge->src_output(),
&tensor_or_weights);
if (!status.ok()) {
VLOG(2) << "Failed to convert input `" << src_def.name() << "` to a "
<< "TRT_TensorOrWeights: " << status.message();
return errors::Internal(
"Failed to convert at least one input to a TRT_TensorOrWeights: ",
status.message());
}
inputs.push_back(tensor_or_weights);
}
auto validator = GetValidator(op);
TF_RETURN_IF_ERROR(validator.status());
OpConverterParams params(node->def(), inputs, nullptr,
&weight_store_, precision_mode_, use_calibration_,
use_implicit_batch_, use_explicit_precision_);
return (*validator)(¶ms);
}
Status TrtNodeValidator::ConvertConstToWeights(
const NodeDef& const_node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
TRT_TensorOrWeights* output) {
std::vector<TRT_TensorOrWeights> outputs;
OpConverterParams params(const_node_def, inputs, &outputs, &weight_store_,
precision_mode_, use_calibration_,
use_implicit_batch_, use_explicit_precision_);
auto const_val = GetValidator(const_node_def.op());
TF_RETURN_IF_ERROR(const_val.status());
Status status = (*const_val)(¶ms);
if (status.ok() && (output != nullptr)) {
*output = outputs[0];
}
return status;
}
StatusOr<std::unique_ptr<Converter>> Converter::Create(
TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger, const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision,
OpKernelContext* ctx) {
std::unique_ptr<Converter> converter = absl::WrapUnique(new Converter(
precision_mode, use_calibration, trt_logger, use_implicit_batch,
engine_name, use_explicit_precision, ctx));
TF_RETURN_IF_ERROR(converter->Init(trt_logger));
return converter;
}
Converter::Converter(TrtPrecisionMode precision_mode, bool use_calibration,
nvinfer1::ILogger* trt_logger,
const bool use_implicit_batch,
absl::string_view engine_name, bool use_explicit_precision,
OpKernelContext* ctx)
: ctx_(ctx),
precision_mode_(precision_mode),
use_calibration_(use_calibration),
use_implicit_batch_(use_implicit_batch),
engine_name_(engine_name),
use_explicit_precision_(use_explicit_precision) {
MaybeInitializeTrtPlugins(trt_logger);
}
Status Converter::Init(nvinfer1::ILogger* trt_logger) {
VLOG(1) << "Creating TensorRT builder";
trt_builder_.reset(nvinfer1::createInferBuilder(*trt_logger));
VLOG(1) << "Creating TensorRT network";
uint32_t flags =
use_implicit_batch_
? 0U
: (1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
if (use_explicit_precision_) {
flags |=
(1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_PRECISION));
}
trt_network_.reset(trt_builder_->createNetworkV2(flags));
if (!trt_network_) {
return errors::Internal("Failed to create TensorRT network object");
}
return OkStatus();
}
Status Converter::ConvertNode(const NodeDef& node_def) {
std::vector<TRT_TensorOrWeights> inputs;
std::vector<TRT_TensorOrWeights> outputs;
TF_RETURN_IF_ERROR(this->GetInputs(node_def, &inputs));
OpConverterParams params(this, node_def, inputs, &outputs, &weight_store_);
const string& op = node_def.op();
auto op_converter = GetOpConverterRegistry()->LookUp(op);
TF_RETURN_IF_ERROR(op_converter.status());
TF_RETURN_IF_ERROR((*op_converter)(¶ms));
for (size_t i = 0; i < outputs.size(); ++i) {
TRT_TensorOrWeights& output = outputs[i];
string output_name = node_def.name();
if (i != 0) {
StrAppend(&output_name, ":", i);
}
if (output.is_tensor()) {
const char* tensor_name = output.tensor()->getName();
if (!IsEngineInput(tensor_name)) {
output.tensor()->setName(output_name.c_str());
}
}
VLOG(2) << "Adding out tensor " << output_name << ": "
<< output.DebugString();
Status status = AddTensorOrWeights(output_name, output);
if (!status.ok()) {
return errors::Create(static_cast<absl::StatusCode>(status.code()),
StrCat("Failed to add output for node: ",
node_def.name(), ": ", status.message()),
errors::GetPayloads(status));
}
}
return OkStatus();
}
Status Converter::AddInputTensor(const string& name, nvinfer1::DataType dtype,
const nvinfer1::Dims& dims, int batch_size) {
Status status;
if (use_implicit_batch_) {
status = MaybeUpdateBatchSize(batch_size);
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status, batch_size_error(name, status.message()));
}
}
ITensorProxyPtr tensor = network()->addInput(name.c_str(), dtype, dims);
if (*tensor == nullptr) {
return errors::InvalidArgument("Failed to create Input layer tensor ", name,
" rank=", dims.nbDims);
}
status = AddTensorOrWeights(name, TRT_TensorOrWeights(tensor));
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status,
StrCat("Failed to add input tensor ", name, ": ", status.message()));
}
return OkStatus();
}
Status Converter::AddInputResource(const string& name,
const ResourceHandle& resource) {
Status status = AddTensorOrWeights(name, TRT_TensorOrWeights(resource));
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status,
StrCat("Failed to add input resource ", name, ": ", status.message()));
}
return OkStatus();
}
Status Converter::RenameAndMarkOutputTensors(
const std::vector<Converter::EngineOutputInfo>& output_tensors) {
int output_index = 0;
for (const auto& output : output_tensors) {
TRT_TensorOrWeights tensor_or_weights;
TF_RETURN_IF_ERROR(
GetTensorOrWeights(output.source_tensor_name, &tensor_or_weights));
if (!tensor_or_weights.is_tensor()) {
return errors::InvalidArgument("Output ", output.source_tensor_name,
" is weights not tensor");
}
ITensorProxyPtr tensor = tensor_or_weights.tensor();
if (*tensor == nullptr) {
return errors::NotFound("Output tensor not found: ",
output.source_tensor_name);
}
if (IsEngineInput(tensor->getName()) || IsEngineOutput(tensor->getName())) {
nvinfer1::IShuffleLayer* layer =
network()->addShuffle(*tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(
layer, StrCat("Output Copy for ", tensor->getName()));
SetLayerName(layer, tensor->getName(), "shuffle", output_index);
tensor = layer->getOutput(0);
}
tensor->setName(output.dest_node_name.c_str());
network()->markOutput(*tensor->trt_tensor());
tensor->setType(output.trt_dtype);
output_index++;
VLOG(1) << "Marking output TRT tensor " << output.source_tensor_name
<< " with data type " << DebugString(output.trt_dtype)
<< ", which feeds TF node " << output.dest_node_name;
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Created TensorRT network with the following layers:";
for (int i = 0; i < network()->getNbLayers(); i++) {
auto layer = network()->getLayer(i);
VLOG(2) << " " << layer->getName() << " ("
<< "type: " << static_cast<int>(layer->getType())
<< ", precision: " << static_cast<int>(layer->getPrecision())
<< ")";
}
}
return OkStatus();
}
bool AbortCudaEngineBuild() {
bool value;
Status status = ReadBoolFromEnvVar("TF_TRT_ABORT_CUDA_ENGINE_BUILD",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}
Status Converter::BuildCudaEngine(
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, int max_batch_size,
size_t max_workspace_size_bytes, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator, TrtShapeOptimizationProfile* profiles) {
tensorflow::profiler::AnnotatedTraceMe activity(
[&]() {
return tensorflow::profiler::TraceMeOpOverride("TRTEngineOp",
"BuildEngine");
},
tensorflow::profiler::TraceMeLevel::kInfo);
if (AbortCudaEngineBuild()) {
return errors::Aborted(
"Engine creation aborted by TF_TRT_ABORT_CUDA_ENGINE_BUILD variable");
}
VLOG(1) << "Configuring TensorRT builder";
trt_builder_->setMaxBatchSize(max_batch_size);
trt_builder_->setGpuAllocator(allocator);
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config(
trt_builder_->createBuilderConfig());
builder_config->setMaxWorkspaceSize(max_workspace_size_bytes);
std::unique_ptr<nvinfer1::IAlgorithmSelector> trt_algorithm_selector{nullptr};
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
if (!use_calibration_ || precision_mode_ != TrtPrecisionMode::INT8) {
trt_algorithm_selector = MaybeCreateAlgorithmSelector();
}
} else {
trt_algorithm_selector = MaybeCreateAlgorithmSelector();
}
if (trt_algorithm_selector != nullptr) {
builder_config->setAlgorithmSelector(trt_algorithm_selector.get());
}
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
enum class SparseComputeMode { DISABLED, ENABLED, SIMULATED };
static SparseComputeMode sparse_compute_mode = []() {
SparseComputeMode _sparse_compute_mode;
int64 _sparse_mode;
TF_CHECK_OK(tensorflow::ReadInt64FromEnvVar("TF_TRT_SPARSE_MODE",
1,
&_sparse_mode));
string sparse_log_msg = "[TF-TRT] Sparse compute capability: ";
if (_sparse_mode == 1) {
sparse_log_msg = StrCat(sparse_log_msg, "enabled.");
_sparse_compute_mode = SparseComputeMode::ENABLED;
} else if (_sparse_mode < 1) {
sparse_log_msg = StrCat(sparse_log_msg, "disabled.");
_sparse_compute_mode = SparseComputeMode::DISABLED;
} else {
sparse_log_msg = StrCat(
sparse_log_msg, "simulated.",
"It shall only be used for sparse computing benchmark and debug.");
_sparse_compute_mode = SparseComputeMode::SIMULATED;
}
LOG(INFO) << sparse_log_msg;
return _sparse_compute_mode;
}();
if (sparse_compute_mode == SparseComputeMode::ENABLED ||
sparse_compute_mode == SparseComputeMode::SIMULATED) {
builder_config->setFlag(nvinfer1::BuilderFlag::kSPARSE_WEIGHTS);
}
#endif
if (tensorflow::tensor_float_32_execution_enabled()) {
builder_config->setFlag(nvinfer1::BuilderFlag::kTF32);
} else {
builder_config->clearFlag(nvinfer1::BuilderFlag::kTF32);
}
if (precision_mode_ == TrtPrecisionMode::FP16) {
builder_config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else if (precision_mode_ == TrtPrecisionMode::INT8) {
if (IS_TRT_VERSION_GE(8, 0, 0, 0) || !use_explicit_precision_) {
builder_config->setFlag(nvinfer1::BuilderFlag::kFP16);
} else {
LOG_WARNING_WITH_PREFIX << "With explicit precision mode, FP16 is not "
"allowed before TensorRT 8. TRT will consider "
"INT8 and FP32 tactics.";
}
builder_config->setFlag(nvinfer1::BuilderFlag::kINT8);
}
if (!use_implicit_batch_ && profiles) {
TF_RETURN_IF_ERROR(profiles->ConfigureBuilder(
trt_builder_.get(), builder_config.get(), network()));
}
if (precision_mode_ == TrtPrecisionMode::INT8) {
builder_config->setInt8Calibrator(use_calibration_ ? calibrator : nullptr);
}
std::unique_ptr<TimingCacheRegistry::TimingCache> timing_cache = nullptr;
if (trt_algorithm_selector == nullptr) {
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
TimingCacheRegistry* registry = GetTimingCacheRegistry();
auto cache = registry->LookUp("default_cache", builder_config.get());
if (!cache.ok()) {
LOG(WARNING) << "failed to create a timing cache: "
<< cache.status().message();
} else {
timing_cache = std::move(*cache);
builder_config->setTimingCache(*timing_cache, false);
}
#endif
} else {
builder_config->setFlag(nvinfer1::BuilderFlag::kDISABLE_TIMING_CACHE);
}
string precision_mode_str;
TF_RETURN_IF_ERROR(
TrtPrecisionModeToName(precision_mode_, &precision_mode_str));
string trt_network_name = StrCat(
"TF:", TF_VERSION_STRING, ", ",
"TRT:", absl::StrJoin(GetLoadedTensorRTVersion(), "."), "-",
"Precision:", precision_mode_str, ", ", "Calibration:", use_calibration_,
", ", "Max-Batch-Size:", max_batch_size, ", ",
"Max-Workspace-Size:", max_workspace_size_bytes);
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
trt_network_name = StrCat(trt_network_name, ", Sparse Compute: ");
switch (sparse_compute_mode) {
case SparseComputeMode::SIMULATED:
trt_network_name = StrCat(trt_network_name, "Simulated");
break;
case SparseComputeMode::ENABLED:
trt_network_name = StrCat(trt_network_name, "Enabled");
break;
case SparseComputeMode::DISABLED:
trt_network_name = StrCat(trt_network_name, "Disabled");
break;
}
#endif
VLOG(1) << "Setting TensorRT network name to " << trt_network_name;
network()->setName(trt_network_name.c_str());
VLOG(1) << "Building TensorRT engine";
if (VLOG_IS_ON(2)) {
VLOG(2) << "Network inputs";
int n_inputs = network()->getNbInputs();
for (int i = 0; i < n_inputs; i++) {
const ITensorProxyPtr input = network()->getInput(i);
if (*input) {
VLOG(2) << " " << i << " " << input->getName();
} else {
VLOG(2) << "Could not find input " << i;
}
}
}
engine->reset(
trt_builder_->buildEngineWithConfig(*network(), *builder_config));
if (engine->get() == nullptr) {
return errors::Internal("Failed to build TensorRT engine");
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "TRT engine created";
int nbBindings = (*engine)->getNbBindings();
VLOG(2) << "Number of engine bindings: " << nbBindings;
for (int i = 0; i < nbBindings; i++) {
auto get_location_string = [&engine](int i) {
if ((*engine)->getLocation(i) == nvinfer1::TensorLocation::kDEVICE)
return " on device";
else
return " on host";
};
VLOG(2) << "Binding " << i << " name: " << (*engine)->getBindingName(i)
<< get_location_string(i);
}
}
if (timing_cache) {
GetTimingCacheRegistry()->Upsert("default_cache", timing_cache.get());
}
return OkStatus();
}
Status Converter::MaybeUpdateBatchSize(int batch_size) {
if (this->batch_size_ < 0 || batch_size < 0 ||
this->batch_size_ == batch_size) {
if (this->batch_size_ < 0 && batch_size >= 0) {
this->batch_size_ = batch_size;
}
return OkStatus();
}
return errors::InvalidArgument(
"Provided batch size does not match converter batch size: ", batch_size,
" vs ", batch_size_);
}
Status Converter::AddTensorOrWeights(const string& name,
TRT_TensorOrWeights input) {
if (use_implicit_batch_ && input.is_tensor()) {
input.set_batch_size(batch_size_);
}
if (trt_tensors_.insert({name, std::move(input)}).second) return OkStatus();
return errors::AlreadyExists("tensor/weights ", name, " already exist.");
}
Status Converter::GetTensorOrWeights(const string& name,
TRT_TensorOrWeights* output) {
if (!trt_tensors_.count(name)) {
return errors::NotFound("Tensor or weights with name ", name,
" could not be found.");
}
*output = trt_tensors_.at(name);
return OkStatus();
}
Status Converter::TransposeTensor(ITensorProxyPtr input_tensor,
const std::vector<int>& order_with_batch_dim,
ITensorProxyPtr* output_tensor,
const NodeDef& node_def,
absl::string_view sub_op_name) {
const auto dims = input_tensor->getDimensions();
const int order_size = use_implicit_batch_ ? order_with_batch_dim.size() - 1
: order_with_batch_dim.size();
if (order_size != size_t(dims.nbDims)) {
return errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (use_implicit_batch_ && order_with_batch_dim[0] != 0) {
return errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
nvinfer1::IShuffleLayer* layer =
this->network()->addShuffle(*input_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Transpose");
SetLayerName(layer, node_def, sub_op_name);
nvinfer1::Permutation permutation;
if (use_implicit_batch_) {
for (int32_t i = 0; i < dims.nbDims; ++i) {
permutation.order[i] = order_with_batch_dim[i + 1] - 1;
}
} else {
std::copy(order_with_batch_dim.begin(), order_with_batch_dim.end(),
permutation.order);
}
VLOG(1) << "TransposeTensor permutation: "
<< DebugString(permutation, dims.nbDims);
layer->setFirstTranspose(permutation);
nvinfer1::Dims reshape_dims;
reshape_dims.nbDims = dims.nbDims;
for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
reshape_dims.d[i] = 0;
}
layer->setReshapeDimensions(reshape_dims);
*output_tensor = layer->getOutput(0);
return OkStatus();
}
Status Converter::GetWeightRange(const TRT_ShapedWeights& weights,
float* out_min, float* out_max) const {
switch (weights.TrtDType()) {
case nvinfer1::DataType::kFLOAT: {
auto inp = weights.GetPointer<float>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = *result.first;
*out_max = *result.second;
break;
}
case nvinfer1::DataType::kHALF: {
auto inp = weights.GetPointer<Eigen::half>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = static_cast<float>(*result.first);
*out_max = static_cast<float>(*result.second);
break;
}
case nvinfer1::DataType::kINT32: {
auto inp = weights.GetPointer<int>();
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = static_cast<float>(*result.first);
*out_max = static_cast<float>(*result.second);
break;
}
default:
return errors::Unimplemented(
"Data type not supported for GetWeightRange: ",
DebugString(weights.TrtDType()));
}
return OkStatus();
}
void Converter::SetLayerName(nvinfer1::ILayer* layer, const NodeDef& node_def,
absl::string_view sub_op_name,
std::optional<int> sub_op_instance,
std::optional<std::string> origin_node_name) {
std::string sub_op_suffix = GetLayerNameSuffix(sub_op_name, sub_op_instance);
if (sub_op_suffix.empty()) {
SetLayerNameHelper(layer, engine_name_, node_def.name());
} else if (origin_node_name.has_value()) {
auto layer_name = absl::StrCat(node_def.name(), "-",
absl::string_view(origin_node_name.value()),
"-", sub_op_suffix);
SetLayerNameHelper(layer, engine_name_, layer_name);
} else {
SetLayerNameHelper(layer, engine_name_,
absl::StrCat(node_def.name(), "-", sub_op_suffix));
}
}
void Converter::SetLayerName(nvinfer1::ILayer* layer,
absl::string_view main_op_name,
absl::string_view sub_op_name,
std::optional<int> sub_op_instance) {
std::string layer_name_suffix =
GetLayerNameSuffix(sub_op_name, sub_op_instance);
SetLayerNameHelper(layer, engine_name_,
absl::StrCat(main_op_name, "-", layer_name_suffix));
}
Status PrepareTensorForShape(Converter* converter,
const TRT_TensorOrWeights& input,
const DimsAdapter& dims,
const bool validation_only,
ITensorProxyPtr* tensor, const NodeDef& node_def,
std::optional<int> op_instance,
std::optional<std::string> origin_node_name) {
DimsAdapter input_dims(input.GetTrtDims());
if (dims.Volume() > 0 && AreDimsStaticWithDifferentSize(input_dims, dims)) {
return errors::InvalidArgument(
"Incompatible shapes: ", input_dims.DebugString(), " vs. ",
dims.DebugString());
}
if (input.is_weights() && !dims.IsStatic()) {
return errors::InvalidArgument("Shape is not fully defined: ",
dims.DebugString());
}
if (validation_only) {
*tensor = nullptr;
return OkStatus();
}
TFTRT_RETURN_ERROR_IF_NULLPTR(converter, "converter is nullptr");
if (input.is_tensor()) {
if (input_dims == dims) {
*tensor = input.tensor();
} else {
nvinfer1::IShuffleLayer* layer =
converter->network()->addShuffle(*input.tensor()->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Reshape");
converter->SetLayerName(layer, node_def, "shuffle", op_instance,
origin_node_name);
layer->setReshapeDimensions(dims.AsTrtDims());
*tensor = layer->getOutput(0);
}
} else {
*tensor = converter->CreateConstantLayer(input.weights(), dims.AsTrtDims());
TFTRT_RETURN_ERROR_IF_NULLPTR(*tensor, "TF-TRT Internal Reshape");
}
return OkStatus();
}
void Converter::ProvideQuantizationRange(ITensorProxyPtr* tensor,
float min_range, float max_range) {
float symmetric_range = std::max(std::abs(min_range), std::abs(max_range));
if ((*tensor)->is_trt_tensor()) {
quantization_ranges_[(*tensor)->trt_tensor()] = symmetric_range;
} else if ((*tensor)->is_simple_tensor()) {
quantization_ranges_proxy_[tensor] = symmetric_range;
}
}
void Converter::MaybeApplyQuantizationRanges() {
if (precision_mode() != TrtPrecisionMode::INT8) return;
for (auto pair : quantization_ranges_) {
nvinfer1::ITensor* tensor = pair.first;
const float range = pair.second;
VLOG(1) << "Setting range for: " << tensor->getName() << ": " << range;
tensor->setDynamicRange(-range, range);
}
for (auto pair : quantization_ranges_proxy_) {
ITensorProxyPtr tensor = *pair.first;
const float range = pair.second;
VLOG(1) << "Setting range for: " << tensor->getName() << ": " << range;
tensor->setDynamicRange(-range, range);
}
}
Status Converter::GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
for (auto const& input_name : node_def.input()) {
if (input_name[0] == '^') continue;
string name = input_name;
auto last = name.find_last_of(':');
if (last != string::npos && last + 2 == name.size() &&
name[last + 1] == '0') {
name.erase(last);
}
if (trt_tensors_.count(name)) {
TRT_TensorOrWeights input = trt_tensors_.at(name);
inputs->push_back(input);
VLOG(2) << "Retrieved input " << name << ": " << input.DebugString();
} else {
string msg("Node ");
StrAppend(&msg, node_def.name(), " should have an input named '", name,
"' but it is not available");
LOG(ERROR) << msg;
return errors::InvalidArgument(msg);
}
}
return OkStatus();
}
Status CheckInputsWeights(
const OpConverterParams& params,
const std::vector<std::pair<string, TrtInputArg>>& expected_inputs) {
const auto& inputs = params.inputs;
const auto& node_def = params.node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), expected_inputs.size(), node_def);
for (int i = 0; i < inputs.size(); i++) {
if (expected_inputs[i].second == TrtInputArg::kWeight &&
!inputs.at(i).is_weights()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a constant");
}
if (expected_inputs[i].second == TrtInputArg::kTensor &&
!inputs.at(i).is_tensor()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a tensor");
}
if (expected_inputs[i].second == TrtInputArg::kResource &&
!inputs.at(i).is_resource()) {
return errors::Unimplemented("The input \"", expected_inputs[i].first,
"\" for ", node_def.op(),
" must be a resource handle");
}
}
return OkStatus();
}
Status CheckInputsWeights(
const OpConverterParams& params,
const std::vector<std::pair<string, bool>>& inputs_is_weight) {
std::vector<std::pair<string, TrtInputArg>> expected_inputs;
expected_inputs.reserve(inputs_is_weight.size());
std::transform(
inputs_is_weight.begin(), inputs_is_weight.end(),
std::back_inserter(expected_inputs), [](std::pair<string, bool> x) {
return std::make_pair(
x.first, x.second ? TrtInputArg::kWeight : TrtInputArg::kTensor);
});
return CheckInputsWeights(params, expected_inputs);
}
Status GetNodeDefTfType(const NodeDef& node_def, DataType* tf_type,
const string type_attr_name_in = "") {
string type_attr_name;
if (type_attr_name_in.empty()) {
if (node_def.op() == "ReadVariableOp" ||
node_def.op() == "ResourceGather") {
type_attr_name = "dtype";
} else {
type_attr_name = "T";
}
} else {
type_attr_name = type_attr_name_in;
}
AttrSlice attrs(node_def);
if (attrs.FindByString(type_attr_name) == nullptr) {
return errors::InvalidArgument("Attribute with name ", type_attr_name,
" not found.");
}
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, type_attr_name, tf_type));
return OkStatus();
}
Status GetInputTfType(const OpConverterParams& params, DataType* tf_type,
int pos) {
const std::vector<TRT_TensorOrWeights>& inputs = params.inputs;
if (inputs.size() <= pos) {
return errors::Internal("Invalid input position");
}
return inputs[pos].GetTfType(tf_type);
}
Status GetOutputTfType(const OpConverterParams& params, DataType* tf_type) {
return GetNodeDefTfType(params.node_def, tf_type);
}
Status AllowDataTypes(const OpConverterParams& params,
const std::set<DataType>& allowed_types,
const char* type_attr_name = "") {
const auto& node_def = params.node_def;
DataType tf_type;
TF_RETURN_IF_ERROR(GetNodeDefTfType(node_def, &tf_type, type_attr_name));
if (!allowed_types.count(tf_type)) {
const auto error =
convert_not_supported_dtype_msg(allowed_types, tf_type, node_def);
return errors::Unimplemented(error);
}
return OkStatus();
}
namespace {
std::vector<int64_t> GetSpatialDimsFromOutputSizes(
const TRT_TensorOrWeights& output_sizes, const int h_index,
const int w_index) {
const TRT_ShapedWeights& weights = output_sizes.weights();
const int output_sizes_length = weights.count();
auto output_sizes_values = weights.GetPointer<int>();
return {output_sizes_values[output_sizes_length == 4 ? h_index : 0],
output_sizes_values[output_sizes_length == 4 ? w_index : 1]};
}
}
Status ConvertConv2DHelper(const OpConverterParams* params, int group,
bool is_conv2d_backprop_input) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TRT_TensorOrWeights backprop_output_size;
ITensorProxyPtr tensor = nullptr;
if (is_conv2d_backprop_input) {
if (!params->use_explicit_precision) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input_sizes", true}, {"filter", true}, {"out_backprop", false}}));
}
backprop_output_size = inputs.at(0);
tensor = inputs.at(2).tensor();
bool has_dynamic_hw_shape{false};
int start_idx{0};
auto dims = tensor->getDimensions();
if (params->use_implicit_batch) {
if (dims.nbDims != 3) {
return errors::Internal(
"In implicit batch mode, input nbDims should be 3");
}
start_idx = 1;
} else {
if (dims.nbDims != 4) {
return errors::Internal(
"In explicit batch mode, input nbDims should be 4");
}
start_idx = 2;
}
for (int i = start_idx; i < dims.nbDims; ++i) {
if (dims.d[i] < 0) {
has_dynamic_hw_shape = true;
}
}
if (has_dynamic_hw_shape) {
return errors::Unimplemented(
"Conv2dBackpropInput does not support input with unknown spatial "
"shape");
}
} else {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", false}, {"filter", !params->use_explicit_precision}}));
tensor = inputs.at(0).tensor();
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (inputs.at(1).GetTrtDims().nbDims != 4) {
return errors::InvalidArgument("Conv2D expects kernel of dimension 4");
}
string data_format, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
int c_index = (data_format == "NHWC") ? 3 : 1;
int h_index = (data_format == "NHWC") ? 1 : 2;
int w_index = (data_format == "NHWC") ? 2 : 3;
if (tf_dilations.size() != 4) {
return errors::InvalidArgument(
"Convolution dilations field must specify 4 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW dilation(tf_dilations[h_index], tf_dilations[w_index]);
if (is_conv2d_backprop_input && (dilation.d[0] != 1 || dilation.d[1] != 1)) {
return errors::Unimplemented(
"Dilation with Conv2DBackpropInput (conv2d_transpose) is not"
" supported");
}
if (tf_stride.size() != 4) {
return errors::InvalidArgument(
"Convolution strides field must specify 4 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
if (!params->use_implicit_batch && tensor->getDimensions().d[c_index] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
if (padding_type != "SAME" && padding_type != "VALID") {
return errors::Unimplemented(padding_type +
" padding type not implemented, "
"only VALID and SAME are supported");
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
if (params->validation_only) return OkStatus();
const bool need_transpose = (data_format == "NHWC");
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
const auto tensor_dim = tensor->getDimensions();
const int c_dim_size = tensor_dim.d[params->use_implicit_batch ? 0 : 1];
const int num_groups = (group == 0) ? c_dim_size : group;
const int output_axis = is_conv2d_backprop_input ? 2 : 3;
auto weights_shape = inputs.at(1).GetTrtDims();
const int noutput = weights_shape.d[output_axis] * num_groups;
nvinfer1::DimsHW kernel_size;
kernel_size.h() = weights_shape.d[0];
kernel_size.w() = weights_shape.d[1];
TRT_ShapedWeights weights_rsck;
if (inputs.at(1).is_weights()) {
weights_rsck = inputs.at(1).weights();
} else {
StatusOr<TRT_ShapedWeights> tmp = params->weight_store->GetTempWeights(
nvinfer1::DataType::kFLOAT, weights_shape);
TRT_ENSURE_OK(tmp);
weights_rsck = std::move(tmp).value();
}
if (!inputs.at(1).is_weights()) {
TRT_ENSURE(params->use_explicit_precision);
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
auto dequant_layer =
builder->FindProducerOf(inputs.at(1).tensor()->trt_tensor());
TRT_ENSURE_PTR_OK(dequant_layer);
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
TRT_ENSURE((*dequant_layer)->getType() == nvinfer1::LayerType::kSCALE);
}
auto quant_layer = builder->UniqueParentOf(*dequant_layer, 0);
TRT_ENSURE_PTR_OK(quant_layer);
if (!IS_TRT_VERSION_GE(8, 0, 0, 0)) {
TRT_ENSURE((*quant_layer)->getType() == nvinfer1::LayerType::kSCALE);
}
auto weights_layer = builder->UniqueParentOf(*quant_layer, 0);
TRT_ENSURE_PTR_OK(weights_layer);
TRT_ENSURE((*weights_layer)->getType() == nvinfer1::LayerType::kCONSTANT);
auto const_weights_rsck =
reinterpret_cast<nvinfer1::IConstantLayer*>(*weights_layer)
->getWeights();
TRT_ENSURE(weights_rsck.count() == weights_rsck.count());
const auto* weights_ptr =
static_cast<const float*>(const_weights_rsck.values);
std::copy_n(weights_ptr, const_weights_rsck.count,
weights_rsck.GetPointer<float>());
}
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_rsck);
TRT_ENSURE_OK(weights);
StatusOr<TRT_ShapedWeights> biases = params->weight_store->GetTempWeights(
nvinfer1::DataType::kFLOAT, nvinfer1::Dims{1, {noutput}});
TRT_ENSURE_OK(biases);
std::fill_n(biases->GetPointer<float>(), noutput, 0.0f);
ReorderRSCKToKCRS(weights_rsck, &*weights, num_groups);
nvinfer1::ILayer* conv_layer = nullptr;
if (is_conv2d_backprop_input) {
nvinfer1::IDeconvolutionLayer* layer =
params->converter->network()->addDeconvolution(
*tensor->trt_tensor(), noutput, kernel_size,
weights->GetTrtWeights(), biases->GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
conv_layer = layer;
} else {
const nvinfer1::Weights empty_weights{nvinfer1::DataType::kFLOAT, nullptr,
0};
nvinfer1::IConvolutionLayer* layer =
params->converter->network()->addConvolution(
*tensor->trt_tensor(), noutput, kernel_size,
params->use_explicit_precision ? empty_weights
: weights->GetTrtWeights(),
empty_weights);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
layer->setDilation(dilation);
conv_layer = layer;
}
if (params->use_explicit_precision) {
TRT_ENSURE(inputs.at(1).is_tensor());
nvinfer1::IShuffleLayer* layer = params->converter->network()->addShuffle(
*inputs.at(1).tensor()->trt_tensor());
layer->setFirstTranspose({3, 2, 0, 1});
layer->setReshapeDimensions({4, {0, 0, 0, 0}});
conv_layer->setInput(1, *layer->getOutput(0));
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (is_conv2d_backprop_input) {
std::vector<int64_t> output_spatial_dims =
GetSpatialDimsFromOutputSizes(backprop_output_size, h_index, w_index);
const int output_height = output_spatial_dims[0];
const int output_width = output_spatial_dims[1];
nvinfer1::Dims trt_output_shape = output_tensor->getDimensions();
int out_h_idx = params->use_implicit_batch ? 1 : 2;
int out_w_idx = params->use_implicit_batch ? 2 : 3;
const int height_diff = output_height - trt_output_shape.d[out_h_idx];
const int width_diff = output_width - trt_output_shape.d[out_w_idx];
if ((height_diff < 0) || (width_diff < 0)) {
return errors::InvalidArgument(
"input_sizes argument of Conv2DBackprop (i.e. output_shape argument "
"of conv2d_transpose) ",
"is too small for the given out_backprop argument of Conv2DBackprop "
"(i.e. input argument of conv2d_transpose). Expect: ",
"(", output_height, ", ", output_width, ") >= ", "(",
trt_output_shape.d[out_h_idx], ", ", trt_output_shape.d[out_w_idx],
")");
}
if ((height_diff > 0) || (width_diff > 0)) {
nvinfer1::DimsHW pre_padding(0, 0);
nvinfer1::DimsHW post_padding(height_diff, width_diff);
nvinfer1::IPaddingLayer* padding_layer =
params->converter->network()->addPadding(*output_tensor->trt_tensor(),
pre_padding, post_padding);
output_tensor = padding_layer->getOutput(0);
params->converter->SetLayerName(padding_layer, node_def, "pad");
}
}
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
bool AllowInefficientTranspose() {
static bool result = [] {
bool value;
Status status =
ReadBoolFromEnvVar("TF_DEBUG_TRT_ALLOW_INEFFICIENT_TRANSPOSE",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}();
return result;
}
Status ConvertTranspose(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"x", false}, {"perm", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
TRT_ShapedWeights weights = inputs.at(1).weights();
const int* weights_ptr = weights.GetPointer<int>();
std::vector<int> perm(weights_ptr, weights_ptr + weights.count());
ITensorProxyPtr input_tensor = inputs.at(0).tensor();
const int perm_size =
params->use_implicit_batch ? perm.size() - 1 : perm.size();
if (perm_size != size_t(input_tensor->getDimensions().nbDims)) {
return errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (params->use_implicit_batch && perm[0] != 0) {
return errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
if (!IS_TRT_VERSION_GE(7, 1, 3, 4)) {
constexpr int64_t kMaxEfficientTranspose = 2500000;
int64_t tensor_size = DimsAdapter(input_tensor->getDimensions()).Volume();
if (!AllowInefficientTranspose() && tensor_size > kMaxEfficientTranspose) {
return errors::Unimplemented(StrCat("Transpose too large:", tensor_size));
}
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
input_tensor, perm, &output_tensor, params->node_def));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertShape(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", TrtInputArg::kBoth}}));
if (params->use_implicit_batch) {
return errors::Unimplemented(
"Shape is only supported for explicit batch mode.");
}
DimsAdapter input_dims(inputs.at(0).GetTrtDims());
if (params->validation_only) return OkStatus();
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
if (input_dims.IsStatic()) {
StatusOr<nvinfer1::IConstantLayer*> const_layer =
builder->ConstantShape(input_dims);
TRT_ENSURE_PTR_OK(const_layer);
params->outputs->push_back(
TRT_TensorOrWeights((*const_layer)->getOutput(0)));
return OkStatus();
}
StatusOr<nvinfer1::IShapeLayer*> shape_layer =
builder->Shape(inputs.at(0).tensor()->trt_tensor());
TRT_ENSURE_PTR_OK(shape_layer);
params->converter->SetLayerName(*shape_layer, params->node_def, "shape");
params->outputs->push_back(TRT_TensorOrWeights((*shape_layer)->getOutput(0)));
return OkStatus();
}
Status ExpectShapeTensor(const TRT_TensorOrWeights& tensor) {
if (tensor.tensor()->getType() != nvinfer1::DataType::kINT32) {
return errors::InvalidArgument("Expected a shape tensor with INT32 type");
}
if (tensor.GetTrtDims().nbDims > 1) {
return errors::InvalidArgument("Expected a 0D or 1D shape tensor");
}
return OkStatus();
}
Status ConvertDynamicReshape(const OpConverterParams* params) {
if (params->use_implicit_batch) {
return errors::InvalidArgument(
"The input \"shape\" for Reshape must be a constant in implicit batch"
" mode.");
}
if (!IS_TRT_VERSION_GE(7, 1, 3, 0)) {
return errors::InvalidArgument(
"Non constant shape input tensor for Reshape requires minimum TRT "
"7.1.3");
}
const auto& inputs = params->inputs;
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
TF_RETURN_IF_ERROR(ExpectShapeTensor(inputs.at(1)));
if (inputs.at(1).tensor()->getDimensions().nbDims == 0) {
return errors::Unimplemented(
"Reshape with dynamic input requires 1D input tensor");
}
if (params->validation_only) return OkStatus();
nvinfer1::IShuffleLayer* layer = params->converter->network()->addShuffle(
*input_tensor.tensor()->trt_tensor());
VLOG(2) << "ConvertReshape setInput (1) "
<< DebugString(inputs.at(1).tensor()->getDimensions());
layer->setInput(1, *inputs.at(1).tensor()->trt_tensor());
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertStaticReshapeForExplicitBatchMode(
const OpConverterParams* params, DimsAdapter output_dims,
ITensorProxyPtr* output_tensor) {
return PrepareTensorForShape(params->converter, params->inputs.at(0),
output_dims, params->validation_only,
output_tensor, params->node_def);
}
Status ConvertStaticReshapeForImplicitBatchMode(
const OpConverterParams* params, DimsAdapter output_dims,
ITensorProxyPtr* output_tensor) {
const auto& inputs = params->inputs;
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const int input_batch_dim = input_tensor.batch_size();
const int64_t output_batch_dim = output_dims.dim(0);
DimsAdapter input_nonbatch_dims(input_tensor.GetTrtDims());
DimsAdapter output_nonbatch_dims(output_dims);
TF_RETURN_IF_ERROR(output_nonbatch_dims.RemoveBatchDimension());
VLOG(1) << "input_batch_dim=" << input_batch_dim
<< ", input_nonbatch_dims=" << input_nonbatch_dims.DebugString()
<< "\nresult_batch_dim=" << output_batch_dim
<< ", result_nonbatch_dims=" << output_nonbatch_dims.DebugString();
bool reshape_may_change_batch_dim = false;
if (input_batch_dim != -1 && output_batch_dim != -1) {
reshape_may_change_batch_dim = (input_batch_dim != output_batch_dim);
} else {
reshape_may_change_batch_dim =
!AreDimsStaticWithSameSize(input_nonbatch_dims, output_nonbatch_dims);
}
if (reshape_may_change_batch_dim) {
return errors::Unimplemented("Reshape on batch dimension is not supported");
}
return PrepareTensorForShape(params->converter, input_tensor,
output_nonbatch_dims, params->validation_only,
output_tensor, params->node_def);
}
Status ConvertReshape(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"tensor", TrtInputArg::kTensor}, {"shape", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (inputs.at(1).is_tensor()) {
return ConvertDynamicReshape(params);
}
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.count() == 0 && params->use_implicit_batch) {
return errors::Unimplemented("Reshape to shape=[] is not supported");
}
DimsAdapter output_shape_dims(
absl::MakeSpan(weights.GetPointer<int>(), weights.count()));
ITensorProxyPtr output_tensor = nullptr;
if (!params->use_implicit_batch) {
TF_RETURN_IF_ERROR(ConvertStaticReshapeForExplicitBatchMode(
params, output_shape_dims, &output_tensor));
} else {
TF_RETURN_IF_ERROR(ConvertStaticReshapeForImplicitBatchMode(
params, output_shape_dims, &output_tensor));
}
if (params->validation_only) return OkStatus();
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertExpandDims(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"axis", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
auto axis = inputs.at(1).weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("ExpandDims axis must be a scalar");
}
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(axis[0], dims.nbDims + 1, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
if (!params->use_implicit_batch && !HasStaticShape(input_dims)) {
TF_RETURN_IF_ERROR(params->converter->DynamicExpandDims(
input_tensor.tensor(),
dims,
trt_axis,
params,
&output_tensor));
} else {
input_dims.insert(input_dims.begin() + trt_axis, 1);
DimsAdapter dims(input_dims);
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, input_tensor, dims,
false, &output_tensor, params->node_def));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status Converter::DynamicReshape(ITensorProxyPtr input,
std::vector<std::pair<int, int>> slices,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::vector<int> size_for_added_dims,
std::optional<int> op_instance) {
*output = nullptr;
if (params->validation_only) {
return errors::Internal(
"DynamicReshape should not be used during validation");
}
ITensorProxyPtr shape =
network()->addShape(*input->trt_tensor())->getOutput(0);
std::vector<ITensorProxyPtr> concat_inputs;
int max_num_slices = std::max(slices.size(), size_for_added_dims.size());
int op_instance_value = op_instance.has_value() ? op_instance.value() : 0;
for (int i = 0; i < max_num_slices; i++) {
ITensorProxyPtr tensor;
if (i < size_for_added_dims.size() && size_for_added_dims[i] >= 0) {
nvinfer1::Dims dims{1, {1}};
if (size_for_added_dims[i] > 0) {
dims.d[0] = size_for_added_dims[i];
}
TF_RETURN_IF_ERROR(
CreateScalarConstant(params, std::min(size_for_added_dims[i], 1),
&tensor, nvinfer1::DataType::kINT32, dims));
concat_inputs.push_back(tensor);
}
if (i < slices.size()) {
nvinfer1::ISliceLayer* slice_layer = network()->addSlice(
*shape->trt_tensor(), {1, {slices[i].first}},
{1, {slices[i].second - slices[i].first}}, {1, {1}});
concat_inputs.push_back(slice_layer->getOutput(0));
string slice_name = StrCat("slice_", op_instance_value);
SetLayerName(slice_layer, params->node_def, slice_name,
i);
}
}
std::vector<nvinfer1::ITensor*> trt_concat_inputs;
for (const auto& t : concat_inputs) {
trt_concat_inputs.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* concat_layer = network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_concat_inputs.data()),
concat_inputs.size());
SetLayerName(concat_layer, params->node_def, "concat", op_instance);
concat_layer->setAxis(0);
ITensorProxyPtr new_shape = concat_layer->getOutput(0);
nvinfer1::IShuffleLayer* shuffle =
network()->addShuffle(*input->trt_tensor());
SetLayerName(shuffle, params->node_def, "shuffle", op_instance);
shuffle->setInput(1, *new_shape->trt_tensor());
*output = shuffle->getOutput(0);
return OkStatus();
}
Status Converter::DynamicExpandDims(ITensorProxyPtr input,
const nvinfer1::Dims& dims, int axis,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::optional<int> op_instance) {
if (params->validation_only) {
*output = nullptr;
return errors::Internal(
"DynamicExpandDims should not be used during validation");
}
std::vector<std::pair<int, int>> slices;
std::vector<int> extra_dims;
if (axis != 0) {
slices.push_back(std::pair<int, int>{0, axis});
extra_dims.push_back(-1);
}
extra_dims.push_back(1);
if (axis != dims.nbDims) {
slices.push_back(std::pair<int, int>{axis, dims.nbDims});
}
return DynamicReshape(
input,
slices,
params,
output,
extra_dims,
op_instance);
}
Status Converter::SqueezeTensor(ITensorProxyPtr input,
std::vector<int>* input_dims,
const OpConverterParams* params,
ITensorProxyPtr* output,
std::optional<int> op_instance) {
if (!params->use_implicit_batch && !HasStaticShape(*input_dims)) {
std::vector<std::pair<int, int>> slices;
for (int i = 0; i < input_dims->size(); i++) {
if (input_dims->at(i) != 0) {
slices.push_back(std::pair<int, int>(i, i + 1));
}
}
return DynamicReshape(
input,
slices,
params,
output,
{},
op_instance);
}
input_dims->erase(std::remove(input_dims->begin(), input_dims->end(), 0),
input_dims->end());
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(input), DimsAdapter(*input_dims),
false, output, params->node_def, op_instance));
return OkStatus();
}
Status ConvertSqueeze(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_TensorOrWeights& input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
std::vector<int64_t> squeeze_dims;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "squeeze_dims", &squeeze_dims));
if (squeeze_dims.empty()) {
if (params->use_implicit_batch || !HasStaticShape(dims)) {
return errors::Unimplemented(
"Squeeze is not implemented for empty squeeze_dims");
} else {
for (int& dim : input_dims) {
if (dim == 1) {
dim = 0;
}
}
}
} else {
std::vector<int> trt_axes;
trt_axes.reserve(squeeze_dims.size());
for (int tf_axis : squeeze_dims) {
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (input_dims[trt_axis] != -1 && input_dims[trt_axis] != 1) {
return errors::InvalidArgument(
"Dimension ", tf_axis, " with size ", input_dims[trt_axis],
" cannot be squeezed because it must be size 1");
}
trt_axes.push_back(trt_axis);
}
for (int axis : trt_axes) {
input_dims[axis] = 0;
}
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
input_tensor.tensor(),
&input_dims,
params,
&output_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertSlice(const OpConverterParams* params) {
const auto& inputs = params->inputs;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"input", false}, {"begin", true}, {"size", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
const TRT_ShapedWeights& begin_weights = inputs.at(1).weights();
const TRT_ShapedWeights& size_weights = inputs.at(2).weights();
if (absl::c_any_of(begin_weights.GetSpan<int32>(),
[](const int32 val) { return val < 0; })) {
return errors::InvalidArgument("\"begin\" in Slice is out of range");
}
if (absl::c_any_of(size_weights.GetSpan<int32>(),
[](const int32 val) { return val < -1; })) {
return errors::InvalidArgument("\"size\" in Slice is out of range");
}
PartialTensorShape input_shape;
TF_RETURN_IF_ERROR(
DimsAdapter(inputs.at(0).GetTrtDims())
.PartialTensorShape(
&input_shape, params->use_implicit_batch
? std::optional<int>(inputs.at(0).batch_size())
: std::nullopt));
if (static_cast<int64>(input_shape.dims()) !=
begin_weights.GetTensor().NumElements() ||
static_cast<int64>(input_shape.dims()) !=
size_weights.GetTensor().NumElements()) {
return errors::InvalidArgument(
"Length of begin and size arguments must equal rank of input for "
"Slice");
}
if (params->use_implicit_batch) {
auto begin_v = begin_weights.GetSpan<int32>();
auto size_v = size_weights.GetSpan<int32>();
if (begin_v[0] != 0 ||
(size_v[0] != -1 && size_v[0] != input_shape.dim_size(0))) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension in "
"implicit batch mode");
}
}
PartialTensorShape processing_shape;
PartialTensorShape final_shape;
bool is_identity;
bool is_simple_slice;
bool slice_dim0;
absl::InlinedVector<int64, 4> begin;
absl::InlinedVector<int64, 4> end;
absl::InlinedVector<int64, 4> strides;
StridedSliceShapeSpec strided_slice_spec;
std::bitset<32> begin_mask(0);
std::bitset<32> end_mask(0);
std::bitset<32> ellipsis_mask(0);
std::bitset<32> new_axis_mask(0);
std::bitset<32> shrink_axis_mask(0);
Tensor strides_tensor = tensor::DeepCopy(begin_weights.GetTensor());
Tensor end_tensor = tensor::DeepCopy(size_weights.GetTensor());
Tensor size_tensor = tensor::DeepCopy(size_weights.GetTensor());
auto strides_vec = strides_tensor.flat<int32>();
auto end_vec = end_tensor.flat<int32>();
auto size_vec = size_tensor.flat<int32>();
auto begin_vec = begin_weights.GetTensor().flat<int32>();
for (int i = 0; i < input_shape.dims(); i++) {
strides_vec(i) = 1;
begin_mask[i] = false;
if (size_vec(i) == -1) {
end_mask[i] = true;
end_vec(i) = 0;
size_vec(i) = 0;
} else {
end_mask[i] = false;
end_vec(i) = begin_vec(i) + size_vec(i);
if (end_vec(i) > input_shape.dim_size(i) && input_shape.dim_size(i) > 0) {
return errors::InvalidArgument("\"begin\" + \"size\" for dimension ", i,
" in Slice is out of range");
}
}
}
auto bitset_to_int32 = [](const std::bitset<32>& bs) {
return static_cast<int32_t>(bs.to_ulong());
};
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
&begin_weights.GetTensor(), &end_tensor, strides_tensor, input_shape,
bitset_to_int32(begin_mask), bitset_to_int32(end_mask),
bitset_to_int32(ellipsis_mask), bitset_to_int32(new_axis_mask),
bitset_to_int32(shrink_axis_mask), &processing_shape, &final_shape,
&is_identity, &is_simple_slice, &slice_dim0, &begin, &end, &strides,
&strided_slice_spec));
VLOG(2) << "ConvertSlice: " << "\n input_shape: " << input_shape
<< "\n processing_shape: " << processing_shape
<< "\n final_shape: " << final_shape
<< "\n begin: " << DebugString(begin)
<< "\n stride: " << DebugString(strides)
<< "\n end: " << DebugString(end)
<< "\n is identity: " << is_identity
<< "\n is simple_slice: " << is_simple_slice
<< "\n slice dim0: " << slice_dim0
<< " StridedSliceShapeSpec:" << "\n begin_dense_mask: "
<< std::bitset<32>(strided_slice_spec.begin_dense_mask)
<< "\n end_dense_mask: "
<< std::bitset<32>(strided_slice_spec.end_dense_mask)
<< "\n shrink_dense_mask: "
<< std::bitset<32>(strided_slice_spec.shrink_axis_dense_mask);
return ConvertStridedSliceHelper(params, inputs.at(0), input_shape, begin,
strides, end, std::nullopt, std::nullopt,
strided_slice_spec);
}
Status ConvertStridedSlice(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", false}, {"begin", true}, {"end", true}, {"strides", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
int32 begin_mask, end_mask, ellipsis_mask, shrink_axis_mask, new_axis_mask;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "begin_mask", &begin_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "end_mask", &end_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ellipsis_mask", &ellipsis_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "shrink_axis_mask", &shrink_axis_mask));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "new_axis_mask", &new_axis_mask));
if (new_axis_mask != 0) {
return errors::Unimplemented(
"new_axis_mask is not supported for StridedSlice");
}
if (params->use_implicit_batch && shrink_axis_mask & 1) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension");
}
PartialTensorShape input_shape;
TF_RETURN_IF_ERROR(
DimsAdapter(inputs.at(0).GetTrtDims())
.PartialTensorShape(
&input_shape, params->use_implicit_batch
? std::optional<int>(inputs.at(0).batch_size())
: std::nullopt));
const TRT_ShapedWeights& begin_weights = inputs.at(1).weights();
const TRT_ShapedWeights& end_weights = inputs.at(2).weights();
const TRT_ShapedWeights& stride_weights = inputs.at(3).weights();
if (!AllLengthsEqual({begin_weights.ToVector<int>(),
end_weights.ToVector<int>(),
stride_weights.ToVector<int>()})) {
return errors::InvalidArgument(
"Length of begin, end, and stride must be equal");
}
PartialTensorShape processing_shape;
PartialTensorShape final_shape;
bool is_identity;
bool is_simple_slice;
bool slice_dim0;
absl::InlinedVector<int64, 4> begin;
absl::InlinedVector<int64, 4> end;
absl::InlinedVector<int64, 4> strides;
StridedSliceShapeSpec strided_slice_spec;
TF_RETURN_IF_ERROR(ValidateStridedSliceOp(
&begin_weights.GetTensor(), &end_weights.GetTensor(),
stride_weights.GetTensor(), input_shape, begin_mask, end_mask,
ellipsis_mask, new_axis_mask, shrink_axis_mask, &processing_shape,
&final_shape, &is_identity, &is_simple_slice, &slice_dim0, &begin, &end,
&strides, &strided_slice_spec));
if (!params->validation_only) {
VLOG(2) << "After ValidateStridedSliceOp:" << "\n input_shape: "
<< input_shape << "\n processing_shape: " << processing_shape
<< "\n final_shape: " << final_shape
<< "\n begin: " << DebugString(begin)
<< "\n stride: " << DebugString(strides)
<< "\n end: " << DebugString(end)
<< " is identity: " << is_identity
<< "\n is simple_slice: " << is_simple_slice
<< "\n slice dim0: " << slice_dim0
<< " StridedSliceShapeSpec:" << "\n begin_dense_mask: "
<< std::bitset<32>(strided_slice_spec.begin_dense_mask)
<< "\n end_dense_mask: "
<< std::bitset<32>(strided_slice_spec.end_dense_mask)
<< "\n shrink_dense_mask: "
<< std::bitset<32>(strided_slice_spec.shrink_axis_dense_mask);
}
if (params->use_implicit_batch &&
!((ellipsis_mask & 1) &&
begin_weights.Shape().NumDims() < input_shape.dims())) {
const bool begin_is_modified = !(begin_mask & 1) && (begin[0] != 0);
const bool stride_is_modified = (strides[0] != 1);
const bool batch_size_is_defined = (input_shape.dim_size(0) > 0);
const bool end_is_modified =
!(end_mask & 1) &&
(!batch_size_is_defined || (end[0] != input_shape.dim_size(0)));
if (begin_is_modified || stride_is_modified || end_is_modified) {
return errors::Unimplemented(
"TensorRT does not allow modifications to the batch dimension");
}
}
std::optional<nvinfer1::Dims> final_shape_dims = std::nullopt;
if (shrink_axis_mask) {
final_shape_dims.emplace();
auto dims_adap =
DimsAdapter::Create(final_shape, params->use_implicit_batch);
TRT_ENSURE_OK(dims_adap);
*final_shape_dims = dims_adap->AsTrtDims();
}
return ConvertStridedSliceHelper(params, inputs.at(0), input_shape, begin,
strides, end, final_shape_dims, 0,
strided_slice_spec);
}
Status ConvertConv2D(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 1, false);
}
Status ConvertConv2DDepthwise(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 0, false);
}
Status ConvertConv2DBackpropInput(const OpConverterParams* params) {
return ConvertConv2DHelper(params, 1, true);
}
Status ConvertConv3DHelper(const OpConverterParams* params, int group,
bool is_conv3d_backprop_input = false) {
const int kNumDims = 5;
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TRT_TensorOrWeights backprop_output_size;
ITensorProxyPtr tensor = nullptr;
if (is_conv3d_backprop_input) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input_sizes", true}, {"filter", true}, {"out_backprop", false}}));
backprop_output_size = inputs.at(0);
tensor = inputs.at(2).tensor();
} else {
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"filter", true}}));
tensor = inputs.at(0).tensor();
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
const TRT_ShapedWeights weights_drsck = inputs.at(1).weights();
if (weights_drsck.Shape().NumDims() != kNumDims) {
return errors::InvalidArgument("Conv3D expects kernel of dimension 5");
}
string data_format, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
const bool is_ndhwc = (data_format == "NDHWC");
const int d_index = is_ndhwc ? 1 : 2;
const int h_index = is_ndhwc ? 2 : 3;
const int w_index = is_ndhwc ? 3 : 4;
const int c_index = is_ndhwc ? 4 : 1;
if (tf_dilations.size() != kNumDims) {
return errors::InvalidArgument(
"Convolution dilations field must specify 5 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 dilation_dhw(
tf_dilations[d_index], tf_dilations[h_index], tf_dilations[w_index]);
if (is_conv3d_backprop_input &&
(dilation_dhw.d[0] != 1 || dilation_dhw.d[1] != 1 ||
dilation_dhw.d[2] != 1)) {
return errors::Unimplemented(
"Dilation with Conv3DBackpropInputV2 (conv3d_transpose) is not "
"supported");
}
if (tf_stride.size() != kNumDims) {
return errors::InvalidArgument(
"Convolution strides field must specify 5 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 stride_dhw(tf_stride[d_index], tf_stride[h_index],
tf_stride[w_index]);
const auto tensor_dim = tensor->getDimensions();
if (is_conv3d_backprop_input && padding_type == "SAME") {
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_drsck);
TRT_ENSURE_OK(weights);
nvinfer1::Dims3 effective_kernel_size(
weights->Shape().dim(0) +
(weights->Shape().dim(0) - 1) * (dilation_dhw.d[0] - 1),
weights->Shape().dim(1) +
(weights->Shape().dim(1) - 1) * (dilation_dhw.d[1] - 1),
weights->Shape().dim(2) +
(weights->Shape().dim(2) - 1) * (dilation_dhw.d[2] - 1)
);
const auto output_size_weights =
backprop_output_size.weights().GetPointer<int>();
const std::vector<int64_t> input_dims = {output_size_weights[d_index],
output_size_weights[h_index],
output_size_weights[w_index]};
const std::vector<std::pair<int, int>> padding =
CreateSamePadding(stride_dhw, effective_kernel_size, input_dims);
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second ||
padding[2].first != padding[2].second) {
return errors::Unimplemented(
"Asymmetric padding with Conv3DBackpropInputV2 (conv3d_transpose) is "
"not supported");
}
}
int implicit_batch_offset = params->use_implicit_batch ? -1 : 0;
if (tensor->getDimensions().d[c_index + implicit_batch_offset] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
if (params->validation_only) return OkStatus();
const bool need_transpose = is_ndhwc;
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 4, 1, 2, 3}, &tensor, node_def, "to_NCDHW"));
}
const int num_groups = (group == 0) ? tensor_dim.d[0] : group;
StatusOr<TRT_ShapedWeights> weights =
params->weight_store->GetTempWeights(weights_drsck);
TRT_ENSURE_OK(weights);
ReorderDRSCKToKCDRS(weights_drsck, &*weights, num_groups);
TRT_ShapedWeights biases(weights->TrtDType());
const int output_axis = is_conv3d_backprop_input ? 1 : 0;
const int noutput = weights->Shape().dim(output_axis) * num_groups;
nvinfer1::Dims3 kernel_size_drs(weights->Shape().dim(2),
weights->Shape().dim(3),
weights->Shape().dim(4)
);
nvinfer1::ILayer* conv_layer = nullptr;
if (is_conv3d_backprop_input) {
nvinfer1::IDeconvolutionLayer* layer =
params->converter->network()->addDeconvolutionNd(
*tensor->trt_tensor(), noutput, kernel_size_drs,
weights->GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride_dhw);
if (padding_type == "SAME") {
VLOG(2) << "Using SAME padding";
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
conv_layer = layer;
} else {
nvinfer1::IConvolutionLayer* layer =
params->converter->network()->addConvolutionNd(
*tensor->trt_tensor(), noutput, kernel_size_drs,
weights->GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride_dhw);
if (padding_type == "SAME") {
VLOG(2) << "Using SAME padding";
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
layer->setNbGroups(num_groups);
layer->setDilationNd(dilation_dhw);
conv_layer = layer;
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 4, 1}, &output_tensor, node_def, "to_NDHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertConv3D(const OpConverterParams* params) {
return ConvertConv3DHelper(params, 1, false);
}
Status ConvertConv3DBackpropInputV2(const OpConverterParams* params) {
return ConvertConv3DHelper(params, 1, true);
}
Status ConvertPool3D(const OpConverterParams* params) {
const int kNumDims = 5;
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
nvinfer1::PoolingType type;
if (node_def.op() == "MaxPool3D") {
type = nvinfer1::PoolingType::kMAX;
} else if (node_def.op() == "AvgPool3D") {
type = nvinfer1::PoolingType::kAVERAGE;
} else {
return errors::Unimplemented("Unsupported pooling type: ", node_def.op());
}
string data_format, padding_type;
std::vector<int64_t> tf_stride, tf_kernel;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &tf_kernel));
if ((padding_type != "SAME") && (padding_type != "VALID")) {
return errors::Unimplemented("Unsupported padding type: ", padding_type);
}
const bool is_ndhwc = (data_format == "NDHWC");
const int c_index = is_ndhwc ? 4 : 1;
const int d_index = is_ndhwc ? 1 : 2;
const int h_index = is_ndhwc ? 2 : 3;
const int w_index = is_ndhwc ? 3 : 4;
if (tf_stride.size() != kNumDims) {
return errors::InvalidArgument(
"Pooling strides field must specify 5 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"stride must be 1 for batch and channel dimensions");
}
if (tf_kernel.size() != kNumDims) {
return errors::InvalidArgument(
"Pooling ksize field must specify 5 dimensions");
}
if (tf_kernel[0] != 1 || tf_kernel[c_index] != 1) {
return errors::Unimplemented(
"ksize must be 1 for batch and channel dimensions");
}
const nvinfer1::Dims3 stride(tf_stride[d_index], tf_stride[h_index],
tf_stride[w_index]);
const nvinfer1::Dims3 ksize(tf_kernel[d_index], tf_kernel[h_index],
tf_kernel[w_index]);
if (!(ksize.nbDims >= 3 &&
(ksize.d[0] >= 1 && ksize.d[1] >= 1 && ksize.d[2] >= 1) &&
(ksize.d[0] * ksize.d[1] * ksize.d[2] < MAX_KERNEL_DIMS_PRODUCT(3)))) {
return errors::InvalidArgument("Window dimensions are not within bounds");
}
if (params->validation_only) return OkStatus();
ITensorProxyPtr tensor = inputs.at(0).tensor();
if (data_format == "NDHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 4, 1, 2, 3}, &tensor, node_def, "to_NCDHW"));
}
nvinfer1::IPoolingLayer* layer = params->converter->network()->addPoolingNd(
*tensor->trt_tensor(), type, ksize);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStrideNd(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(layer, node_def, "pooling");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (data_format == "NDHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 4, 1}, &output_tensor, node_def, "to_NDHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertFusedConv2DBiasActivation(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false},
{"filter", true},
{"bias", true},
{"side_input", true},
{"conv_input_scale", true},
{"side_input_scale", true}}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.Shape().NumDims() != 4) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation expects kernel of dimension 4");
}
string data_format, filter_format, activation_mode, padding_type;
std::vector<int64_t> tf_dilations, tf_stride;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "filter_format", &filter_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "activation_mode", &activation_mode));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "dilations", &tf_dilations));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
if (data_format != "NHWC" && data_format != "NCHW") {
return errors::InvalidArgument("Unsupported data_format:", data_format);
}
int c_index = (data_format == "NHWC") ? 3 : 1;
int h_index = (data_format == "NHWC") ? 1 : 2;
int w_index = (data_format == "NHWC") ? 2 : 3;
if (tf_dilations.size() != 4) {
return errors::InvalidArgument(
"Convolution dilations field must specify 4 dimensions");
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW dilation(tf_dilations[h_index], tf_dilations[w_index]);
if (tf_stride.size() != 4) {
return errors::InvalidArgument(
"Convolution strides field must specify 4 dimensions");
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions");
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
auto op_pair = ActivationTypeMap()->find(activation_mode);
if (op_pair == ActivationTypeMap()->end() && activation_mode != "None") {
return errors::Unimplemented("Activation mode not supported: ",
activation_mode);
}
if (filter_format != "HWIO" && filter_format != "OIHW") {
return errors::InvalidArgument("Unsupported filter_format:", filter_format);
}
TRT_ShapedWeights side_input = inputs.at(3).weights();
if (side_input.count() != 0) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation doesn't yet support side_input");
}
TRT_ShapedWeights conv_input_scale = inputs.at(4).weights();
if (conv_input_scale.count() != 1 ||
conv_input_scale.TrtDType() != nvinfer1::DataType::kFLOAT ||
conv_input_scale.GetSpan<float>()[0] != 1.0) {
return errors::InvalidArgument(
"FusedConv2DBiasActivation doesn't yet support conv_input_scale");
}
if (params->validation_only) return OkStatus();
const bool need_transpose = (data_format == "NHWC");
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
nvinfer1::DimsHW kernel_size;
if (filter_format == "OIHW") {
kernel_size.h() = weights.Shape().dim(2);
kernel_size.w() = weights.Shape().dim(3);
} else {
DCHECK_EQ(filter_format, "HWIO");
kernel_size.h() = weights.Shape().dim(0);
kernel_size.w() = weights.Shape().dim(1);
}
TRT_ShapedWeights biases = inputs.at(2).weights();
nvinfer1::IConvolutionLayer* conv_layer = nullptr;
if (filter_format == "OIHW") {
conv_layer = params->converter->network()->addConvolution(
*tensor->trt_tensor(), weights.Shape().dim(0), kernel_size,
weights.GetTrtWeights(), biases.GetTrtWeights());
} else {
TRT_ENSURE(filter_format == "HWIO");
StatusOr<TRT_ShapedWeights> weights_kcrs =
params->weight_store->GetTempWeights(weights);
TRT_ENSURE_OK(weights_kcrs);
ReorderRSCKToKCRS(weights, &*weights_kcrs, 1);
conv_layer = params->converter->network()->addConvolution(
*tensor->trt_tensor(), weights.Shape().dim(3), kernel_size,
weights_kcrs->GetTrtWeights(), biases.GetTrtWeights());
}
TFTRT_RETURN_ERROR_IF_NULLPTR(conv_layer, node_def.name());
conv_layer->setStride(stride);
if (padding_type == "SAME") {
conv_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(conv_layer, node_def, "conv");
conv_layer->setNbGroups(1);
conv_layer->setDilation(dilation);
ITensorProxyPtr output_tensor = conv_layer->getOutput(0);
if (op_pair != ActivationTypeMap()->end()) {
nvinfer1::IActivationLayer* activation_layer =
params->converter->network()->addActivation(
*output_tensor->trt_tensor(), op_pair->second);
TFTRT_RETURN_ERROR_IF_NULLPTR(activation_layer, node_def.name());
params->converter->SetLayerName(activation_layer, node_def, "activation");
output_tensor = activation_layer->getOutput(0);
}
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertPool(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT8};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
nvinfer1::PoolingType type;
if (node_def.op() == "MaxPool") {
type = nvinfer1::PoolingType::kMAX;
} else if (node_def.op() == "AvgPool") {
type = nvinfer1::PoolingType::kAVERAGE;
} else {
return errors::Unimplemented("Unsupported pooling type: ", node_def.op());
}
string data_format, padding_type;
std::vector<int64_t> tf_stride, tf_kernel;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "padding", &padding_type));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "strides", &tf_stride));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "ksize", &tf_kernel));
if ((padding_type != "SAME") && (padding_type != "VALID")) {
return errors::Unimplemented("Unsupported padding type: ", padding_type);
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
int h_index = 2;
int w_index = 3;
if (data_format == "NHWC") {
h_index = 1;
w_index = 2;
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
if (!((ksize.h() >= 1 && ksize.w() >= 1) &&
(ksize.h() * ksize.w() < MAX_KERNEL_DIMS_PRODUCT(2)))) {
return errors::InvalidArgument("Window dimensions are not within bounds");
}
if (params->validation_only) return OkStatus();
if (data_format == "NHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, {0, 3, 1, 2}, &tensor, node_def, "to_NCHW"));
}
nvinfer1::IPoolingLayer* layer = params->converter->network()->addPooling(
*tensor->trt_tensor(), type, ksize);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
if (padding_type == "SAME") {
layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
params->converter->SetLayerName(layer, node_def, "pooling");
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (data_format == "NHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, {0, 2, 3, 1}, &output_tensor, node_def, "to_NHWC"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertClipByValue(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"t", false}, {"clip_value_min", true}, {"clip_value_max", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (params->validation_only) return OkStatus();
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "T", &dtype));
float clip_value_min = 0.0f;
float clip_value_max = 0.0f;
if (dtype == DataType::DT_FLOAT) {
clip_value_min = inputs.at(1).weights().GetSpan<float>()[0];
clip_value_max = inputs.at(2).weights().GetSpan<float>()[0];
} else if (dtype == DataType::DT_HALF) {
clip_value_min =
static_cast<float>(inputs.at(1).weights().GetSpan<Eigen::half>()[0]);
clip_value_max =
static_cast<float>(inputs.at(2).weights().GetSpan<Eigen::half>()[0]);
}
nvinfer1::IActivationLayer* layer =
params->converter->network()->addActivation(
*inputs.at(0).tensor()->trt_tensor(),
nvinfer1::ActivationType::kCLIP);
layer->setAlpha(clip_value_min);
layer->setBeta(clip_value_max);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "activation");
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertBiasAdd(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
if (inputs[0].is_weights() && inputs[1].is_weights()) {
return errors::InvalidArgument(
"All inputs are weights, but Grappler is expected to fold them.");
}
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
string data_format;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "data_format", &data_format));
nvinfer1::Dims input_shape = inputs.at(0).GetTrtDims();
nvinfer1::Dims bias_shape = inputs.at(1).GetTrtDims();
if (data_format == "NCHW") {
if (params->use_implicit_batch) {
bias_shape.nbDims = input_shape.nbDims;
std::fill(bias_shape.d + 1, bias_shape.d + bias_shape.nbDims, 1);
} else {
std::vector<int> bias_shape_vec(bias_shape.d,
bias_shape.d + bias_shape.nbDims);
bias_shape_vec.insert(bias_shape_vec.begin(), 1);
bias_shape_vec.insert(bias_shape_vec.end(),
input_shape.nbDims - bias_shape_vec.size(), 1);
DimsAdapter(bias_shape_vec).TrtDims(&bias_shape);
}
} else {
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(inputs.at(0), inputs.at(1),
true,
params->use_implicit_batch,
&input_shape, &bias_shape));
}
ITensorProxyPtr input_tensor{nullptr};
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, inputs.at(0), DimsAdapter(input_shape),
params->validation_only, &input_tensor, node_def,
0));
ITensorProxyPtr bias_tensor{nullptr};
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, inputs.at(1), DimsAdapter(bias_shape),
params->validation_only, &bias_tensor, node_def,
1));
VLOG(2) << "Bias shape adjusted to " << DebugString(bias_shape);
if (params->validation_only) return OkStatus();
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*input_tensor->trt_tensor(), *bias_tensor->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "sum");
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
template <typename Input>
inline bool IsIntegerInInt32Bounds(const Input& inp) {
static_assert(std::is_integral<Input>::value,
"This function is only implemented for integral types.");
if (sizeof(Input) < sizeof(int32) || std::is_same<Input, int32>::value) {
return true;
}
if (!std::numeric_limits<Input>::is_signed) {
return inp <= static_cast<Input>(std::numeric_limits<int32>::max());
}
return (inp >= static_cast<Input>(std::numeric_limits<int32>::lowest()) &&
inp <= static_cast<Input>(std::numeric_limits<int32>::max()));
}
template <DataType dtype>
Status CopyToTrtInt32Array(const Tensor& tensor, int32* dst) {
typedef typename EnumToDataType<dtype>::Type CType;
const CType* src = tensor.flat<CType>().data();
for (int i = 0; i < tensor.NumElements(); ++i) {
if (!IsIntegerInInt32Bounds(src[i])) {
return errors::InvalidArgument("Value at index ", i,
" is outside the range of int32");
}
dst[i] = static_cast<int32>(src[i]);
}
return OkStatus();
}
Status TfTensorToTrtWeights(const Tensor& tensor, TrtWeightStore* weight_store,
TRT_ShapedWeights* weights) {
const DataType dtype = tensor.dtype();
DataType converted_dtype = DataTypeIsInteger(dtype) ? DT_INT32 : dtype;
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(TfTypeToTrtType(converted_dtype, &trt_dtype));
if (tensor.NumElements() == 0) {
*weights = TRT_ShapedWeights(trt_dtype);
return OkStatus();
}
StatusOr<DimsAdapter> weight_dims = DimsAdapter::Create(tensor.shape());
TRT_ENSURE_OK(weight_dims);
auto tmp = weight_store->GetTempWeights(trt_dtype, weight_dims->AsTrtDims());
TRT_ENSURE_OK(tmp);
*weights = std::move(tmp).value();
if (converted_dtype == dtype) {
std::copy_n(tensor.tensor_data().data(), tensor.TotalBytes(),
weights->GetPointer<int8>());
return OkStatus();
}
Status status = OkStatus();
int32* dst = weights->GetPointer<int32>();
switch (dtype) {
case DT_INT8:
status = CopyToTrtInt32Array<DT_INT8>(tensor, dst);
break;
case DT_UINT8:
status = CopyToTrtInt32Array<DT_UINT8>(tensor, dst);
break;
case DT_INT16:
status = CopyToTrtInt32Array<DT_INT16>(tensor, dst);
break;
case DT_UINT16:
status = CopyToTrtInt32Array<DT_UINT16>(tensor, dst);
break;
case DT_UINT32:
status = CopyToTrtInt32Array<DT_UINT32>(tensor, dst);
break;
case DT_INT64:
status = CopyToTrtInt32Array<DT_INT64>(tensor, dst);
break;
case DT_UINT64:
status = CopyToTrtInt32Array<DT_UINT64>(tensor, dst);
break;
default:
return errors::Internal("Unexpected DataType: ", DataTypeString(dtype));
}
return status;
}
Status ConvertConst(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (!inputs.empty()) {
return errors::InvalidArgument(
"Constant node is expected to have empty input list");
}
const auto& tensor_proto = node_def.attr().at("value").tensor();
Tensor tensor;
if (!tensor.FromProto(tensor_proto)) {
return errors::Internal("Cannot parse weight tensor proto: ",
node_def.name());
}
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "dtype", &dtype));
if (dtype != tensor.dtype()) {
return errors::InvalidArgument("DataType mismatch between attr (",
DataTypeString(dtype), ") and tensor (",
DataTypeString(tensor.dtype()), ")");
}
TRT_ShapedWeights weights;
TF_RETURN_IF_ERROR(
TfTensorToTrtWeights(tensor, params->weight_store, &weights));
if (params->outputs != nullptr) {
params->outputs->push_back(TRT_TensorOrWeights(weights));
}
return OkStatus();
}
Status ConvertIdentity(const OpConverterParams* params) {
if (params->validation_only) return OkStatus();
for (int i = 0; i < params->inputs.size(); i++) {
params->outputs->push_back(params->inputs.at(i));
}
return OkStatus();
}
Status ConvertFake(const OpConverterParams* params) {
if (params->validation_only) return OkStatus();
return errors::Unimplemented(
"This converter is not valid after graph "
"segmentation. Building an engine using this "
"converter will trigger a native segment "
"fallback.");
}
Status ConvertSquare(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (params->validation_only) return OkStatus();
ITensorProxyPtr const2_tensor = nullptr;
TF_RETURN_IF_ERROR(CreateBroadcastableScalarConstant(
params, 2.0f, inputs.at(0).GetTrtDims(), &const2_tensor));
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*inputs.at(0).tensor()->trt_tensor(), *const2_tensor->trt_tensor(),
nvinfer1::ElementWiseOperation::kPOW);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertReduce(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"axis", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
auto tf_axes_list = inputs.at(1).weights().GetSpan<int>();
DataType idx_dtype{DataType::DT_INT32};
bool keep_dims{false};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "Tidx", &idx_dtype));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "keep_dims", &keep_dims));
if (idx_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Tidx supports only DT_INT32");
}
int axes = 0;
if (tf_axes_list.size() == 0) {
return errors::InvalidArgument(
"TRT cannot support reduce on all (batch) dimensions");
}
for (int i = 0; i < tf_axes_list.size(); i++) {
int trt_axis;
TF_RETURN_IF_ERROR(
ConvertAxis(tf_axes_list[i], tensor->getDimensions().nbDims,
node_def.name(), params->use_implicit_batch, &trt_axis));
axes |= (1 << trt_axis);
}
nvinfer1::ReduceOperation reduce_operation;
if (node_def.op() == "Sum") {
reduce_operation = nvinfer1::ReduceOperation::kSUM;
} else if (node_def.op() == "Prod") {
reduce_operation = nvinfer1::ReduceOperation::kPROD;
} else if (node_def.op() == "Max") {
reduce_operation = nvinfer1::ReduceOperation::kMAX;
} else if (node_def.op() == "Min") {
reduce_operation = nvinfer1::ReduceOperation::kMIN;
} else if (node_def.op() == "Mean") {
reduce_operation = nvinfer1::ReduceOperation::kAVG;
} else {
return errors::Unimplemented("Op not supported ", node_def.op());
}
if (params->validation_only) return OkStatus();
nvinfer1::ILayer* layer = params->converter->network()->addReduce(
*tensor->trt_tensor(), reduce_operation, axes, keep_dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertPack(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
int num_inputs{0};
int64_t tf_axis{0};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "N", &num_inputs));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "axis", &tf_axis));
if (num_inputs != inputs.size()) {
return errors::InvalidArgument(
"Number of inputs for Pack is inconsistent with N attribute");
}
TrtInputArg expected_arg =
params->use_implicit_batch ? TrtInputArg::kTensor : TrtInputArg::kBoth;
std::vector<std::pair<string, TrtInputArg>> inputs_is_weight;
inputs_is_weight.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_is_weight.push_back({StrCat("values_", i), expected_arg});
}
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, inputs_is_weight));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT32};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
if (num_inputs > 1) {
TF_RETURN_IF_ERROR(
VerifyShapesMatch(inputs, -1, node_def.name()));
}
int idx = 0;
for (int i = 1; i < inputs.size(); i++) {
if (HasStaticShape(inputs.at(i).GetTrtDims())) {
idx = i;
}
}
DimsAdapter dims(inputs.at(idx).GetTrtDims());
int trt_axis{0};
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.NumDims() + 1, node_def.name(),
params->use_implicit_batch, &trt_axis));
std::vector<int64_t> tensor_dims(dims.begin(), dims.end());
tensor_dims.insert(tensor_dims.begin() + trt_axis, 1);
std::vector<ITensorProxyPtr> expanded_tensors;
int input_index = 0;
for (const TRT_TensorOrWeights& input : inputs) {
ITensorProxyPtr expanded_tensor = nullptr;
if (input.is_tensor() && !params->use_implicit_batch &&
!HasStaticShape(dims)) {
if (!params->validation_only) {
TF_RETURN_IF_ERROR(params->converter->DynamicExpandDims(
input.tensor(),
dims.AsTrtDims(),
trt_axis,
params,
&expanded_tensor,
input_index));
}
} else {
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter,
input,
DimsAdapter(tensor_dims),
params->validation_only,
&expanded_tensor,
node_def,
input_index));
}
if (!params->validation_only) {
expanded_tensors.push_back(expanded_tensor);
}
input_index++;
}
if (params->validation_only) return OkStatus();
if (num_inputs == 1) {
params->outputs->push_back(TRT_TensorOrWeights(expanded_tensors[0]));
return OkStatus();
}
std::vector<nvinfer1::ITensor*> trt_expanded_tensors;
for (const auto& t : expanded_tensors) {
trt_expanded_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_expanded_tensors.data()),
expanded_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "concat");
layer->setAxis(trt_axis);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertPad(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"tensor", false}, {"paddings", true}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT8}));
ITensorProxyPtr tensor = inputs.at(0).tensor();
const auto dims = tensor->getDimensions();
const int nb_dims =
params->use_implicit_batch ? dims.nbDims + 1 : dims.nbDims;
if (nb_dims < 4) {
return errors::InvalidArgument("Convertpad requires at least 4D input");
}
TRT_ShapedWeights pads = inputs.at(1).weights();
DataType padding_dtype{DataType::DT_INT32};
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "Tpaddings", &padding_dtype));
if (pads.Shape().dim(0) != nb_dims || pads.Shape().dim(1) != 2) {
return errors::InvalidArgument("Paddings must be a weight with shape ",
"[n, 2], where n is the rank of input ",
"tensor");
}
if (padding_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Tpaddings supports only DT_INT32");
}
auto pad_data = pads.GetPointer<int>();
std::vector<int32_t> tf_pad_index;
for (int i = 0; i < nb_dims; i++) {
if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
tf_pad_index.push_back(i);
}
}
if (tf_pad_index.empty()) {
params->outputs->push_back(inputs.at(0));
return OkStatus();
}
if (tf_pad_index.size() > 2) {
return errors::InvalidArgument(
"Padding layer does not support padding on > 2");
}
if (params->use_implicit_batch && tf_pad_index[0] == 0) {
return errors::InvalidArgument(
"Padding layer does not support padding on batch dimension");
}
if (params->validation_only) return OkStatus();
bool transposed_pad = false;
std::vector<int> transpose_idx(nb_dims);
std::iota(transpose_idx.begin(), transpose_idx.end(), 0);
std::vector<int> trt_pad_index{nb_dims - 2, nb_dims - 1};
nvinfer1::DimsHW pre_padding(0, 0);
nvinfer1::DimsHW post_padding(0, 0);
std::vector<int> trt_pre_post_padding_index{0, 1};
if (tf_pad_index.size() == 1 && tf_pad_index[0] == nb_dims - 1) {
trt_pad_index[0] = nb_dims - 1;
trt_pre_post_padding_index[0] = 1;
}
if (tf_pad_index.size() == 2 && tf_pad_index[1] == nb_dims - 2) {
std::swap(trt_pad_index[0], trt_pad_index[1]);
std::swap(trt_pre_post_padding_index[0], trt_pre_post_padding_index[1]);
}
for (int i = 0; i < tf_pad_index.size(); i++) {
const int tf_index = tf_pad_index[i];
const int trt_index = trt_pad_index[i];
const int k = trt_pre_post_padding_index[i];
pre_padding.d[k] = pad_data[tf_index * 2];
post_padding.d[k] = pad_data[tf_index * 2 + 1];
if (tf_index != trt_index) {
transposed_pad = true;
std::swap(transpose_idx[tf_index], transpose_idx[trt_index]);
}
}
if (transposed_pad) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, transpose_idx, &tensor, node_def, "to_pad"));
}
nvinfer1::IPaddingLayer* layer = params->converter->network()->addPadding(
*tensor->trt_tensor(), pre_padding, post_padding);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
if (transposed_pad) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output_tensor, transpose_idx, &output_tensor, node_def, "from_pad"));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertSplitHelper(const OpConverterParams* params,
const TRT_TensorOrWeights& input, int tf_axis,
int num_splits, bool squeeze_after) {
const auto& node_def = params->node_def;
const nvinfer1::Dims dims = input.GetTrtDims();
int trt_axis;
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
if (dims.d[trt_axis] < 0) {
return errors::InvalidArgument("Dimension ", tf_axis,
" must have statically defined dimensions");
}
if (squeeze_after && dims.d[trt_axis] != num_splits) {
return errors::InvalidArgument(
"Dimension ", tf_axis, " has size ", dims.d[trt_axis],
" which is not equal to num of ", num_splits);
}
if (dims.d[trt_axis] % num_splits != 0) {
return errors::InvalidArgument("Dimension ", tf_axis, " of size ",
dims.d[trt_axis],
" is not evenly divisible by ", num_splits);
}
std::vector<int> begin(dims.nbDims, 0);
std::vector<int64> input_dims(dims.d, dims.d + dims.nbDims);
std::vector<int> size(dims.d, dims.d + dims.nbDims);
const int split_size_on_axis = dims.d[trt_axis] / num_splits;
size[trt_axis] = split_size_on_axis;
std::vector<int> stride(dims.nbDims, 1);
if (params->use_implicit_batch) {
begin.insert(begin.begin(), 0);
size.insert(size.begin(), 1);
stride.insert(stride.begin(), 1);
input_dims.insert(input_dims.begin(), std::max(-1, input.batch_size()));
}
PartialTensorShape input_shape(input_dims);
std::optional<nvinfer1::Dims> final_shape_for_unpack = std::nullopt;
const bool is_dynamic_shape = !HasStaticShape(dims);
if (squeeze_after && !is_dynamic_shape) {
std::vector<int> size_after_squeeze(size);
const int tf_axis = trt_axis + (params->use_implicit_batch ? 1 : 0);
size_after_squeeze.erase(size_after_squeeze.begin() + tf_axis);
DimsAdapter adap(size_after_squeeze);
if (params->use_implicit_batch)
TF_RETURN_IF_ERROR(adap.RemoveBatchDimension());
final_shape_for_unpack = adap.AsTrtDims();
}
for (int i = 0; i < num_splits; ++i) {
const int tf_axis = trt_axis + (params->use_implicit_batch ? 1 : 0);
begin[tf_axis] = i * split_size_on_axis;
absl::InlinedVector<int64, 4> stride_v(begin.size(), 1);
absl::InlinedVector<int64, 4> begin_v;
absl::InlinedVector<int64, 4> end_v;
for (int i = 0; i < begin.size(); i++) {
end_v.push_back(begin[i] + size[i]);
begin_v.push_back(begin[i]);
}
TF_RETURN_IF_ERROR(ConvertStridedSliceHelper(
params, input, input_shape, begin_v, stride_v, end_v,
final_shape_for_unpack,
i, std::nullopt));
}
if (params->validation_only) return OkStatus();
if (squeeze_after && is_dynamic_shape) {
for (int i = 0; i < params->outputs->size(); i++) {
ITensorProxyPtr output_tensor = nullptr;
std::vector<int> in_dims(dims.d, dims.d + dims.nbDims);
input_dims[trt_axis] = 0;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
params->outputs->at(i).tensor(),
&in_dims,
params,
&output_tensor,
i));
(*params->outputs)[i] = TRT_TensorOrWeights(output_tensor);
}
}
return OkStatus();
}
Status ConvertSplit(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"axis", true}, {"value", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
int tf_axis = inputs.at(0).weights().GetSpan<int>()[0];
int num_split;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "num_split", &num_split));
return ConvertSplitHelper(params, inputs.at(1), tf_axis, num_split, false);
}
Status ConvertUnpack(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"value", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
if (inputs.at(0).GetTrtDims().nbDims == 0) {
return errors::Unimplemented(
"Input \"value\" for Unpack must be rank 2 or greater");
}
int tf_axis = 0, num = 0;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "axis", &tf_axis));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "num", &num));
return ConvertSplitHelper(params, inputs.at(0), tf_axis, num, true);
}
Status ConvertCast(const OpConverterParams* params) {
auto unsupport_cast_error = [&](string msg) {
return errors::Unimplemented("Cast op is not supported - ", msg);
};
if (isExperimentalFeatureActivated("reject_all_fp_cast_ops")) {
LOG(WARNING) << "`TF_TRT_EXPERIMENTAL_FEATURES=reject_all_fp_cast_ops`is "
<< "meant as a workaround. If the Cast converter leads to any "
<< "performance or accuracy regression, please open an issue "
<< "on GitHub.";
return unsupport_cast_error(
"TF_TRT_EXPERIMENTAL_FEATURES=reject_all_fp_cast_ops has been defined");
}
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF};
DataType input_type;
TF_RETURN_IF_ERROR(GetInputTfType(*params, &input_type, 0));
if (allowed_types.find(input_type) == allowed_types.end()) {
return unsupport_cast_error(
StrCat("Allowed input dtypes: [", DataTypeString(DataType::DT_FLOAT),
", ", DataTypeString(DataType::DT_HALF),
"]. Received: ", DataTypeString(input_type)));
}
DataType output_type;
TF_RETURN_IF_ERROR(GetNodeDefTfType(params->node_def, &output_type,
kCastOutputTypeAttrName));
if (allowed_types.find(output_type) == allowed_types.end()) {
return unsupport_cast_error(
StrCat("Allowed output dtypes: [", DataTypeString(DataType::DT_FLOAT),
", ", DataTypeString(DataType::DT_HALF),
"]. Received: ", DataTypeString(output_type)));
}
return ConvertIdentity(params);
}
Status ConvertConcat(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
int num_inputs{0};
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "N", &num_inputs));
if (num_inputs != static_cast<int>(inputs.size()) - 1) {
return errors::InvalidArgument(
"Number of inputs for ConcatV2 is inconsistent with N attributes.");
}
std::vector<std::pair<string, TrtInputArg>> inputs_kinds;
TrtInputArg expected_input =
params->use_implicit_batch ? TrtInputArg::kTensor : TrtInputArg::kBoth;
inputs_kinds.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
inputs_kinds.push_back({StrCat("values_", i), expected_input});
}
inputs_kinds.push_back({"axis", TrtInputArg::kWeight});
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, inputs_kinds));
std::set<DataType> allowed_types{DataType::DT_FLOAT, DataType::DT_HALF,
DataType::DT_INT32};
TF_RETURN_IF_ERROR(AllowDataTypes(*params, allowed_types));
const auto axis = inputs.at(num_inputs).weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("Axis for ConcatV2 must be a scalar");
}
int trt_axis = 0;
const auto dim = inputs.at(0).GetTrtDims();
TF_RETURN_IF_ERROR(ConvertAxis(axis[0], dim.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
TF_RETURN_IF_ERROR(VerifyShapesMatch(
absl::Span<const TRT_TensorOrWeights>(inputs).first(num_inputs), trt_axis,
node_def.name()));
if (params->validation_only) return OkStatus();
std::vector<ITensorProxyPtr> input_tensors;
input_tensors.reserve(num_inputs);
for (int i = 0; i < num_inputs; i++) {
if (inputs.at(i).is_tensor()) {
input_tensors.push_back(inputs.at(i).tensor());
} else {
input_tensors.push_back(params->converter->CreateConstantLayer(
inputs.at(i).weights(), inputs.at(i).GetTrtDims()));
}
}
std::vector<nvinfer1::ITensor*> trt_input_tensors;
for (const auto& t : input_tensors) {
trt_input_tensors.push_back(t->trt_tensor());
}
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
static_cast<nvinfer1::ITensor* const*>(trt_input_tensors.data()),
input_tensors.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setAxis(trt_axis);
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return OkStatus();
}
Status ConvertFusedBatchNorm(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false},
{"scale", true},
{"offset", true},
{"mean", true},
{"variance", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
float epsilon{0.1f};
string data_format;
bool is_training{false};
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "epsilon", &epsilon));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "is_training", &is_training));
if (is_training) {
LOG_WARNING_WITH_PREFIX
<< node_def.op() << " only supports is_training=false. If you "
<< "are using Keras, please call "
<< "keras.backend.set_learning_phase(0) before constructing "
<< "your model. At " << node_def.name();
return errors::Unimplemented(node_def.op(),
" only supports is_training=false");
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
if (!params->use_implicit_batch) {
int channel_dim = (data_format == "NCHW" ? 1 : 3);
if (tensor->getDimensions().d[channel_dim] == -1) {
return errors::InvalidArgument("Channel dimension must be static");
}
}
auto parameter_type = inputs.at(1).weights().TrtDType();
if ((parameter_type != nvinfer1::DataType::kFLOAT) &&
(parameter_type != nvinfer1::DataType::kHALF)) {
return errors::Unimplemented(
"Only float32 or float16 weight data type is supported,", " got ",
DebugString(parameter_type));
}
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().TrtDType() != parameter_type) {
return errors::Unimplemented(
"Inconsistent parameter type for batchnorm is not supported");
}
}
TRT_ShapedWeights dummy_power_weights(parameter_type);
size_t nweight = 0;
for (int i = 1; i < 5; i++) {
nweight = std::max<size_t>(nweight, inputs.at(i).weights().count());
}
const TRT_ShapedWeights* ptr_shape_weights = nullptr;
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().count() == nweight) {
ptr_shape_weights = &(inputs.at(i).weights());
} else if (inputs.at(i).weights().count() != 1) {
return errors::InvalidArgument("Inconsistent batchnorm parameter count");
}
}
if (params->validation_only) return OkStatus();
StatusOr<TRT_ShapedWeights> combined_scale_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
TRT_ENSURE_OK(combined_scale_weights);
StatusOr<TRT_ShapedWeights> combined_offset_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
TRT_ENSURE_OK(combined_offset_weights);
const Eigen::half* cast_vals_array[4];
const float* vals_array[4];
for (int j = 0; j < 4; j++) {
cast_vals_array[j] = inputs.at(j + 1).weights().GetPointer<Eigen::half>();
vals_array[j] = inputs.at(j + 1).weights().GetPointer<float>();
}
Eigen::half* cast_combined_scale_vals =
combined_scale_weights->GetPointer<Eigen::half>();
Eigen::half* cast_combined_offset_vals =
combined_offset_weights->GetPointer<Eigen::half>();
float* combined_scale_vals = combined_scale_weights->GetPointer<float>();
float* combined_offset_vals = combined_offset_weights->GetPointer<float>();
for (size_t i = 0; i < nweight; ++i) {
float batchnorm_data[4];
for (int j = 0; j < 4; j++) {
if (inputs.at(j + 1).weights().count() != 1) {
if (parameter_type == nvinfer1::DataType::kFLOAT) {
batchnorm_data[j] = vals_array[j][i];
} else if (parameter_type == nvinfer1::DataType::kHALF) {
batchnorm_data[j] = static_cast<float>(cast_vals_array[j][i]);
}
} else {
if (parameter_type == nvinfer1::DataType::kFLOAT) {
batchnorm_data[j] = vals_array[j][0];
} else if (parameter_type == nvinfer1::DataType::kHALF) {
batchnorm_data[j] = static_cast<float>(cast_vals_array[j][0]);
}
}
}
float scale = batchnorm_data[0];
float offset = batchnorm_data[1];
float mean = batchnorm_data[2];
float variance = batchnorm_data[3];
float combined_scale_val = scale / sqrtf(variance + epsilon);
float combined_offset_val = offset - mean * combined_scale_val;
if (parameter_type == nvinfer1::DataType::kFLOAT) {
combined_scale_vals[i] = combined_scale_val;
combined_offset_vals[i] = combined_offset_val;
} else if (parameter_type == nvinfer1::DataType::kHALF) {
cast_combined_scale_vals[i] = Eigen::half(combined_scale_val);
cast_combined_offset_vals[i] = Eigen::half(combined_offset_val);
}
}
ITensorProxyPtr output_tensor;
if (data_format == "NCHW") {
nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
nvinfer1::IScaleLayer* layer = params->converter->network()->addScale(
*tensor->trt_tensor(), mode, combined_offset_weights->GetTrtWeights(),
combined_scale_weights->GetTrtWeights(),
nvinfer1::Weights{nvinfer1::DataType::kFLOAT, nullptr, 0});
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
output_tensor = layer->getOutput(0);
}
if (data_format == "NHWC") {
nvinfer1::Dims dims = tensor->getDimensions();
for (int i = 0; i < dims.nbDims - 1; i++) {
dims.d[i] = 1;
}
dims.d[dims.nbDims - 1] = nweight;
StatusOr<TRTNetworkBuilder> builder = TRTNetworkBuilder::Create(
params->converter->network(), params->weight_store);
TRT_ENSURE_OK(builder);
auto scale_constant_layer = builder->WeightsToConstant(
combined_scale_weights->GetTrtWeights(), dims);
ITensorProxyPtr scale_constant = (*scale_constant_layer)->getOutput(0);
auto scale_layer =
builder->Mul(tensor->trt_tensor(), scale_constant->trt_tensor());
auto offset_constant_layer = builder->WeightsToConstant(
combined_offset_weights->GetTrtWeights(), dims);
ITensorProxyPtr offset_constant = (*offset_constant_layer)->getOutput(0);
auto offset_layer = builder->Add((*scale_layer)->getOutput(0),
offset_constant->trt_tensor());
output_tensor = (*offset_layer)->getOutput(0);
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertGather(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"params", TrtInputArg::kBoth},
{"indices", TrtInputArg::kBoth},
{"axis", TrtInputArg::kWeight}}));
const auto& params_input = inputs.at(0);
const auto& indices_input = inputs.at(1);
const auto& axis_input = inputs.at(2);
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32},
"Tparams"));
TF_RETURN_IF_ERROR(AllowDataTypes(*params, {DataType::DT_INT32},
"Tindices"));
absl::Span<const int> axis = axis_input.weights().GetSpan<int>();
if (axis.size() != 1) {
return errors::InvalidArgument("Axis for GatherV2 must be a scalar");
}
int trt_axis = 0;
TF_RETURN_IF_ERROR(ConvertAxis(
axis[0], params_input.GetTrtDims().nbDims, node_def.name(),
params->use_implicit_batch && params_input.is_tensor(), &trt_axis));
if (params->use_implicit_batch && params_input.is_weights() &&
trt_axis != 0) {
return errors::Unimplemented(
"The input axis must be zero when params is a weight.");
}
if (params->use_implicit_batch &&
(params_input.is_tensor() == indices_input.is_tensor()) &&
(indices_input.batch_size() != 1 || params_input.batch_size() != 1)) {
return errors::Unimplemented(
"Params and indices must have a batch size of 1 when params and indices"
" are both tensors or both constants.");
}
auto get_rank = [params](const auto& input) {
return input.GetTrtDims().nbDims +
(params->use_implicit_batch && input.is_tensor() ? 1 : 0);
};
const int params_tf_rank = get_rank(params_input);
const int indices_tf_rank = get_rank(indices_input);
const int tf_gather_output_rank = params_tf_rank + indices_tf_rank - 1;
if (tf_gather_output_rank >
nvinfer1::Dims::MAX_DIMS + (params->use_implicit_batch ? 1 : 0)) {
return errors::InvalidArgument(
"Result of gather has dimension greater than ",
nvinfer1::Dims::MAX_DIMS + 1);
}
int32 batch_dims;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def, "batch_dims", &batch_dims));
if (params->use_implicit_batch && batch_dims) {
return errors::InvalidArgument(
"batch_dims must be zero in implicit batch mode");
}
if (!params->use_implicit_batch && batch_dims > 1) {
return errors::InvalidArgument(
"batch_dims cannot exceed 1 in dynamic shape mode");
}
if (params->validation_only) return OkStatus();
auto populate_tensor = [params](const auto& input) -> ITensorProxyPtr {
ITensorProxyPtr result_tensor = nullptr;
if (input.is_weights()) {
result_tensor = params->converter->CreateConstantLayer(
input.weights(), input.GetTrtDims());
} else {
result_tensor = input.tensor();
}
return result_tensor;
};
ITensorProxyPtr params_tensor = populate_tensor(params_input);
ITensorProxyPtr indices_tensor = populate_tensor(indices_input);
nvinfer1::IGatherLayer* layer = params->converter->network()->addGather(
*params_tensor->trt_tensor(), *indices_tensor->trt_tensor(), trt_axis);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setNbElementWiseDims(batch_dims);
ITensorProxyPtr output_tensor = layer->getOutput(0);
nvinfer1::Dims trt_gather_output_dims = output_tensor->getDimensions();
if (params->use_implicit_batch) {
const int expected_trt_output_rank = tf_gather_output_rank -
(params_input.is_tensor() ? 1 : 0) -
(indices_input.is_tensor() ? 1 : 0);
if (trt_gather_output_dims.nbDims != expected_trt_output_rank) {
return errors::Internal(
"Get unexpected output dimensions of IGatherLayer. Expect nbDims: ",
expected_trt_output_rank,
", actual nbDims: ", trt_gather_output_dims.nbDims);
}
}
if (params->use_implicit_batch && params_input.is_tensor() &&
indices_input.is_tensor()) {
for (int i = trt_gather_output_dims.nbDims; i > trt_axis; --i) {
trt_gather_output_dims.d[i] = trt_gather_output_dims.d[i - 1];
}
trt_gather_output_dims.d[trt_axis] = 1;
++trt_gather_output_dims.nbDims;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor),
trt_gather_output_dims,
false, &output_tensor, node_def));
}
if (params->use_implicit_batch && params_input.is_weights() &&
indices_input.is_weights()) {
for (int i = trt_axis; i < trt_gather_output_dims.nbDims - 1; ++i) {
trt_gather_output_dims.d[i] = trt_gather_output_dims.d[i + 1];
}
--trt_gather_output_dims.nbDims;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor),
trt_gather_output_dims,
false, &output_tensor, node_def));
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
StatusOr<ITensorProxyPtr> ConvertFullyConnectedImpl(
const OpConverterParams* params, TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b, bool transpose_a, bool transpose_b) {
if (!(!transpose_a && input_a.is_tensor() && input_b.is_weights())) {
VLOG(2) << "Not FC compatible, A must be non transposed tensor, and B "
"must be constant.";
return ITensorProxyPtr(nullptr);
}
if (!params->use_implicit_batch && input_b.GetTrtDims().nbDims > 2 &&
input_b.GetTrtDims().d[0] != 1) {
VLOG(2) << "Not FC compatible, if B has an explicit batch dimension, then "
"it must be 1.";
return ITensorProxyPtr(nullptr);
}
nvinfer1::Dims input_dim = input_a.GetTrtDims();
if (input_dim.d[input_dim.nbDims - 1] == -1) {
VLOG(2) << "Not FC compatible, last dim of A must be static.";
return ITensorProxyPtr(nullptr);
}
if (input_dim.nbDims + 2 > nvinfer1::Dims::MAX_DIMS) {
VLOG(2) << "Not FC compatible, cannot expand A's shape.";
return ITensorProxyPtr(nullptr);
}
ITensorProxyPtr tensor_a = nullptr;
auto reshape_dim = DimsAdapter(input_dim.nbDims,
DimsAdapter::StorageType(input_dim.nbDims, 0))
.Append(1)
.Append(1);
const NodeDef& node_def = params->node_def;
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, input_a, reshape_dim,
false, &tensor_a, node_def, 0,
"FULLY_CONNECTED"));
VLOG(2) << "New shape of A " << DebugString(tensor_a->getDimensions());
TRT_ShapedWeights weights_b = input_b.weights();
TRT_ShapedWeights weights_2D(weights_b);
if (weights_b.Shape().NumDims() > 2) {
if (std::any_of(weights_b.Shape().begin(),
weights_b.Shape().begin() + weights_b.Shape().NumDims() - 2,
[](int d) { return d != 1; })) {
VLOG(2) << "Not FC compatible, B has a batch dim larger than 1";
return ITensorProxyPtr(nullptr);
}
int k = weights_b.Shape().dim(weights_b.Shape().NumDims() - 1);
nvinfer1::Dims dims{2, {static_cast<int>(weights_b.count() / k), k}};
TF_RETURN_IF_ERROR(weights_2D.SetShape(dims));
}
TRT_ShapedWeights weights(weights_2D.TrtDType());
if (!transpose_b) {
auto tmp = params->weight_store->GetTempWeights(weights_2D);
TRT_ENSURE_OK(tmp);
weights = std::move(tmp).value();
ReorderCKtoKC(weights_2D, &weights);
} else {
weights = weights_2D;
}
TRT_ShapedWeights biases(weights.TrtDType());
int k = weights.Shape().dim(weights.Shape().NumDims() - 1);
const int noutput = weights.count() / k;
VLOG(2) << "Using fully connected layer with k=" << k
<< ", n_output=" << noutput
<< " weights shape: " << weights.Shape().DebugString()
<< " to convert " << node_def.op();
nvinfer1::IFullyConnectedLayer* layer =
params->converter->network()->addFullyConnected(
*tensor_a->trt_tensor(), noutput, weights.GetTrtWeights(),
biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_tensor = layer->getOutput(0);
auto output_dim = output_tensor->getDimensions();
output_dim.nbDims -= 2;
std::fill(output_dim.d, output_dim.d + output_dim.nbDims, 0);
TF_RETURN_IF_ERROR(PrepareTensorForShape(
params->converter, TRT_TensorOrWeights(output_tensor), output_dim,
false, &output_tensor, node_def,
1, "FULLY_CONNECTED"));
return output_tensor;
}
StatusOr<ITensorProxyPtr> ConvertMatMulImpl(const OpConverterParams* params,
TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b,
bool transpose_a,
bool transpose_b) {
if (params->use_implicit_batch) {
if ((input_a.GetTrtDims().nbDims < 2 &&
(transpose_a || !input_b.is_weights())) ||
(input_b.GetTrtDims().nbDims < 2)) {
return errors::InvalidArgument(
"MatMul with 2D tensors requires explicit batch mode, or that tensor"
" A is not transposed and B is a constant tensor.");
}
}
if (params->validation_only) return ITensorProxyPtr(nullptr);
StatusOr<ITensorProxyPtr> result = ConvertFullyConnectedImpl(
params, input_a, input_b, transpose_a, transpose_b);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr output = result.value();
if (*output) {
return output;
}
const auto convert_to_itensor =
[¶ms](TRT_TensorOrWeights operand) -> ITensorProxyPtr {
if (operand.is_tensor()) {
return operand.tensor();
} else {
return params->converter->CreateConstantLayer(operand.weights(),
operand.GetTrtDims());
}
};
ITensorProxyPtr tensor_a = convert_to_itensor(input_a);
ITensorProxyPtr tensor_b = convert_to_itensor(input_b);
const auto get_matrix_op = [](ITensorProxyPtr in,
bool transpose) -> nvinfer1::MatrixOperation {
return (transpose) ? nvinfer1::MatrixOperation::kTRANSPOSE
: nvinfer1::MatrixOperation::kNONE;
};
nvinfer1::MatrixOperation op_a, op_b;
op_a = (tensor_a->getDimensions().nbDims < 2)
? nvinfer1::MatrixOperation::kVECTOR
: get_matrix_op(tensor_a, transpose_a);
op_b = get_matrix_op(tensor_b, transpose_b);
nvinfer1::IMatrixMultiplyLayer* layer =
params->converter->network()->addMatrixMultiply(
*tensor_a->trt_tensor(), op_a, *tensor_b->trt_tensor(), op_b);
const auto& node_def = params->node_def;
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
return ITensorProxyPtr(layer->getOutput(0));
}
Status ConvertMatMulHelper(const OpConverterParams* params,
TRT_TensorOrWeights input_a,
TRT_TensorOrWeights input_b, bool transpose_a,
bool transpose_b) {
StatusOr<ITensorProxyPtr> result =
ConvertMatMulImpl(params, input_a, input_b, transpose_a, transpose_b);
TF_RETURN_IF_ERROR(result.status());
if (!params->validation_only) {
params->outputs->push_back(TRT_TensorOrWeights(result.value()));
}
return OkStatus();
}
Status ConvertMatMul(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
bool transpose_a = false, transpose_b = false;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "transpose_a", &transpose_a));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "transpose_b", &transpose_b));
return ConvertMatMulHelper(params, inputs.at(0), inputs.at(1), transpose_a,
transpose_b);
}
Status ConvertBatchMatMul(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFTRT_CHECK_INPUT_SIZE(inputs.size(), 2, node_def);
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"x", TrtInputArg::kBoth}, {"y", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
return errors::InvalidArgument(
"All inputs are weights, but Grappler is expected to fold them.");
}
bool transpose_a = false, transpose_b = false;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "adj_x", &transpose_a));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "adj_y", &transpose_b));
const auto check_weight_is_not_batched =
[](const TRT_TensorOrWeights& input_l,
const TRT_TensorOrWeights& input_r) {
if (input_l.is_weights() &&
input_l.GetTrtDims().nbDims > input_r.GetTrtDims().nbDims &&
input_l.GetTrtDims().d[0] != 1) {
return errors::Unimplemented(
"TensorRT does not support batched constants in implicit batch "
"mode.");
}
return OkStatus();
};
if (params->use_implicit_batch) {
TF_RETURN_IF_ERROR(check_weight_is_not_batched(inputs.at(0), inputs.at(1)));
TF_RETURN_IF_ERROR(check_weight_is_not_batched(inputs.at(1), inputs.at(0)));
}
auto input_l = std::make_unique<TRT_TensorOrWeights>(inputs.at(0));
auto input_r = std::make_unique<TRT_TensorOrWeights>(inputs.at(1));
TF_RETURN_IF_ERROR(BroadcastTensors(input_l, input_r,
false, params));
if (params->validation_only) return OkStatus();
return ConvertMatMulHelper(params, *input_l, *input_r, transpose_a,
transpose_b);
}
Status ConvertArgMinMax(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"dimension", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
DataType output_dtype{DataType::DT_INT32};
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "output_type", &output_dtype));
if (output_dtype != DataType::DT_INT32) {
return errors::Unimplemented("Output type ", DataTypeString(output_dtype),
" is not supported");
}
int tf_axis = inputs.at(1).weights().GetSpan<int>()[0];
int trt_axis;
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
TF_RETURN_IF_ERROR(ConvertAxis(tf_axis, dims.nbDims, node_def.name(),
params->use_implicit_batch, &trt_axis));
nvinfer1::TopKOperation topk_op;
if (node_def.op() == "ArgMin") {
topk_op = nvinfer1::TopKOperation::kMIN;
} else if (node_def.op() == "ArgMax") {
topk_op = nvinfer1::TopKOperation::kMAX;
} else {
return errors::InvalidArgument("Unsupported ArgMin/Max operation");
}
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
const nvinfer1::Dims trt_dims = params->inputs.at(0).GetTrtDims();
if (trt_dims.nbDims >= 4) {
string trt_dim_str = DebugString(trt_dims);
return errors::Unimplemented(node_def.op(), "op is not able to support",
" tensors with 4+ dimensions (excluding batch",
" size). Received: ", trt_dim_str);
}
#endif
if (params->validation_only) return OkStatus();
const uint32_t reduce_axes = 1 << trt_axis;
nvinfer1::ITopKLayer* layer = params->converter->network()->addTopK(
*inputs.at(0).tensor()->trt_tensor(), topk_op, 1, reduce_axes);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, "topk");
ITensorProxyPtr output_indices_tensor = layer->getOutput(1);
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
input_dims[trt_axis] = 0;
ITensorProxyPtr output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
output_indices_tensor,
&input_dims,
params,
&output_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return OkStatus();
}
Status ConvertTopK(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
CheckInputsWeights(*params, {{"input", false}, {"k", true}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
bool sorted{false};
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "sorted", &sorted));
if (!sorted) {
return errors::InvalidArgument("Only sorted=True is supported");
}
ITensorProxyPtr tensor = inputs.at(0).tensor();
const int num_dims = tensor->getDimensions().nbDims;
if (num_dims == 0) {
return errors::InvalidArgument(
"TensorRT TopK cannot apply on batch dimension");
}
TRT_ShapedWeights k_w = inputs.at(1).weights();
if (k_w.count() != 1) {
return errors::InvalidArgument("k value of TopK should be a scalar");
}
if (params->validation_only) return OkStatus();
const nvinfer1::TopKOperation op = nvinfer1::TopKOperation::kMAX;
const int k = *(k_w.GetPointer<int>());
const uint32_t reduce_axes = 1 << (num_dims - 1);
nvinfer1::ITopKLayer* layer = params->converter->network()->addTopK(
*tensor->trt_tensor(), op, k, reduce_axes);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
ITensorProxyPtr output_value_tensor = layer->getOutput(0);
ITensorProxyPtr output_indices_tensor = layer->getOutput(1);
params->outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
return OkStatus();
}
StatusOr<std::pair<ITensorProxyPtr, ITensorProxyPtr>>
CalcDepthSpaceDynamicShape(const OpConverterParams* params, int block_size,
string data_format) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const int channels_axis = data_format == "NCHW" ? 1 : 3;
const int h_axis = data_format == "NCHW" ? 2 : 1;
const int w_axis = data_format == "NCHW" ? 3 : 2;
ITensorProxyPtr shape = params->converter->network()
->addShape(*inputs.at(0).tensor()->trt_tensor())
->getOutput(0);
ITensorProxyPtr batch_size =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr num_channels =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {channels_axis}}, {1, {1}},
{1, {1}})
->getOutput(0);
ITensorProxyPtr h =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {h_axis}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr w =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {w_axis}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr r;
TF_RETURN_IF_ERROR(CreateScalarConstant(params, block_size, &r));
ITensorProxyPtr r_squared;
TF_RETURN_IF_ERROR(
CreateScalarConstant(params, block_size * block_size, &r_squared));
std::vector<ITensorProxyPtr> first_shuffle_tensors(6, nullptr);
std::vector<ITensorProxyPtr> second_shuffle_tensors(4, nullptr);
if (node_def.op() == "DepthToSpace") {
first_shuffle_tensors[0] = batch_size;
first_shuffle_tensors[1] = r;
first_shuffle_tensors[2] = r;
first_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[4] = h;
first_shuffle_tensors[5] = w;
second_shuffle_tensors[0] = batch_size;
second_shuffle_tensors[1] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
second_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
second_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
} else if (node_def.op() == "SpaceToDepth") {
first_shuffle_tensors[0] = batch_size;
first_shuffle_tensors[1] = num_channels;
first_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[3] = r;
first_shuffle_tensors[4] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
first_shuffle_tensors[5] = r;
second_shuffle_tensors[0] = batch_size;
second_shuffle_tensors[1] =
params->converter->network()
->addElementWise(*num_channels->trt_tensor(),
*r_squared->trt_tensor(),
nvinfer1::ElementWiseOperation::kPROD)
->getOutput(0);
second_shuffle_tensors[2] =
params->converter->network()
->addElementWise(*h->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
second_shuffle_tensors[3] =
params->converter->network()
->addElementWise(*w->trt_tensor(), *r->trt_tensor(),
nvinfer1::ElementWiseOperation::kDIV)
->getOutput(0);
}
StatusOr<ITensorProxyPtr> result =
ConcatenateTensors(params, first_shuffle_tensors, 0);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr first_shuffle_shape = result.value();
result = ConcatenateTensors(params, second_shuffle_tensors, 1);
TF_RETURN_IF_ERROR(result.status());
ITensorProxyPtr second_shuffle_shape = result.value();
return std::make_pair(first_shuffle_shape, second_shuffle_shape);
}
Status ConvertDepthSpaceShuffle(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"input", false}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
string data_format;
int block_size;
AttrSlice attrs(node_def);
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "data_format", &data_format));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "block_size", &block_size));
if (block_size < 2) {
return errors::InvalidArgument("Block size must be 2 or greater");
}
if (data_format != "NCHW" && data_format != "NHWC") {
return errors::Unimplemented("Data format ", data_format,
" is not supported");
}
int idx_offset = params->use_implicit_batch ? 0 : 1;
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
const int required_rank = 3 + idx_offset;
if (dims.nbDims != required_rank) {
return errors::InvalidArgument("The input to ", node_def.op(),
" must be rank 4");
}
const int num_channels =
data_format == "NCHW" ? dims.d[0 + idx_offset] : dims.d[2 + idx_offset];
const int h =
data_format == "NCHW" ? dims.d[1 + idx_offset] : dims.d[0 + idx_offset];
const int w =
data_format == "NCHW" ? dims.d[2 + idx_offset] : dims.d[1 + idx_offset];
nvinfer1::Dims first_shuffle_shape;
nvinfer1::Permutation transpose_perm;
nvinfer1::Dims second_shuffle_shape;
if (node_def.op() == "DepthToSpace") {
if (num_channels != -1 && num_channels % (block_size * block_size) != 0) {
return errors::InvalidArgument(
"Number of channels must be divisible by block_size*block_size");
}
first_shuffle_shape = {
5,
{block_size, block_size, num_channels / (block_size * block_size),
h, w}};
transpose_perm = {2, 3, 0, 4, 1};
second_shuffle_shape =
nvinfer1::Dims3(num_channels / (block_size * block_size),
h * block_size, w * block_size);
} else {
if (node_def.op() != "SpaceToDepth")
return errors::InvalidArgument("Incorrect op type ", node_def.op());
if ((h != -1 && h % block_size != 0) || (w != -1 && w % block_size != 0)) {
return errors::InvalidArgument(
"Width and height must be divisible by block_size");
}
first_shuffle_shape = {5,
{num_channels, h / block_size, block_size,
w / block_size, block_size}};
transpose_perm = {2, 4, 0, 1, 3};
second_shuffle_shape = nvinfer1::Dims3(
num_channels * block_size * block_size, h / block_size, w / block_size);
}
if (params->validation_only) return OkStatus();
nvinfer1::IShuffleLayer* first_shuffle =
params->converter->network()->addShuffle(
*inputs.at(0).tensor()->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(first_shuffle, node_def.name());
params->converter->SetLayerName(first_shuffle, node_def, "shuffle",
0);
ITensorProxyPtr second_shuffle_shape_tensor;
if (HasStaticShape(inputs.at(0).GetTrtDims())) {
auto adjust_reshape = [](int N, nvinfer1::Dims dims,
bool use_implicit_batch) {
if (use_implicit_batch) return dims;
for (int i = dims.nbDims; i > 0; i--) {
dims.d[i] = dims.d[i - 1];
}
dims.d[0] = N;
dims.nbDims++;
return dims;
};
first_shuffle_shape = adjust_reshape(dims.d[0], first_shuffle_shape,
params->use_implicit_batch);
second_shuffle_shape = adjust_reshape(dims.d[0], second_shuffle_shape,
params->use_implicit_batch);
first_shuffle->setReshapeDimensions(first_shuffle_shape);
} else {
StatusOr<std::pair<ITensorProxyPtr, ITensorProxyPtr>> result =
CalcDepthSpaceDynamicShape(params, block_size, data_format);
TF_RETURN_IF_ERROR(result.status());
first_shuffle->setInput(1, *result.value().first->trt_tensor());
second_shuffle_shape_tensor = result.value().second;
}
auto adjust_perm = [](int n, nvinfer1::Permutation perm,
bool use_implicit_batch) {
if (use_implicit_batch) return perm;
for (int i = n; i > 0; i--) {
perm.order[i] = perm.order[i - 1] + 1;
}
perm.order[0] = 0;
return perm;
};
transpose_perm = adjust_perm(5, transpose_perm, params->use_implicit_batch);
if (data_format == "NHWC") {
nvinfer1::Permutation layout_transpose =
adjust_perm(3, {2, 0, 1}, params->use_implicit_batch);
first_shuffle->setFirstTranspose(layout_transpose);
}
first_shuffle->setSecondTranspose(transpose_perm);
nvinfer1::IShuffleLayer* second_shuffle =
params->converter->network()->addShuffle(*first_shuffle->getOutput(0));
TFTRT_RETURN_ERROR_IF_NULLPTR(second_shuffle, node_def.name());
params->converter->SetLayerName(second_shuffle, node_def, "shuffle",
1);
if (HasStaticShape(inputs.at(0).GetTrtDims())) {
second_shuffle->setReshapeDimensions(second_shuffle_shape);
} else {
second_shuffle->setInput(1, *second_shuffle_shape_tensor->trt_tensor());
}
if (data_format == "NHWC") {
nvinfer1::Permutation layout_transpose =
adjust_perm(3, {1, 2, 0}, params->use_implicit_batch);
second_shuffle->setSecondTranspose(layout_transpose);
}
params->outputs->push_back(TRT_TensorOrWeights(second_shuffle->getOutput(0)));
return OkStatus();
}
Status ConvertSquaredDifference(const OpConverterParams* params) {
TF_RETURN_IF_ERROR(CheckInputsWeights(*params, {{"x", false}, {"y", false}}));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
nvinfer1::Dims broadcasted_dims_l, broadcasted_dims_r;
TF_RETURN_IF_ERROR(GetTrtBroadcastShape(
inputs.at(0), inputs.at(1), true,
params->use_implicit_batch, &broadcasted_dims_l, &broadcasted_dims_r));
ITensorProxyPtr tensor_l = nullptr;
ITensorProxyPtr tensor_r = nullptr;
TF_RETURN_IF_ERROR(
PrepareTensorForShape(params->converter, inputs.at(0), broadcasted_dims_l,
params->validation_only, &tensor_l, node_def));
TF_RETURN_IF_ERROR(
PrepareTensorForShape(params->converter, inputs.at(1), broadcasted_dims_r,
params->validation_only, &tensor_r, node_def));
if (params->validation_only) return OkStatus();
nvinfer1::IElementWiseLayer* sub =
params->converter->network()->addElementWise(
*tensor_l->trt_tensor(), *tensor_r->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUB);
TFTRT_RETURN_ERROR_IF_NULLPTR(sub, node_def.name());
params->converter->SetLayerName(sub, node_def, "sub");
nvinfer1::IElementWiseLayer* mul =
params->converter->network()->addElementWise(
*sub->getOutput(0), *sub->getOutput(0),
nvinfer1::ElementWiseOperation::kPROD);
TFTRT_RETURN_ERROR_IF_NULLPTR(mul, node_def.name());
params->converter->SetLayerName(mul, node_def, "mul");
params->outputs->push_back(TRT_TensorOrWeights(mul->getOutput(0)));
return OkStatus();
}
#if IS_TRT_VERSION_GE(7, 1, 3, 0) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
Status ConvertCombinedNMS(const OpConverterParams* params) {
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params, {{"boxes", TrtInputArg::kTensor},
{"scores", TrtInputArg::kTensor},
{"max_output_size_per_class", TrtInputArg::kWeight},
{"max_total_size", TrtInputArg::kWeight},
{"iou_threshold", TrtInputArg::kWeight},
{"score_threshold", TrtInputArg::kWeight}}));
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const auto& node_name = node_def.name();
const ITensorProxyPtr boxes_tensor = inputs.at(0).tensor();
const ITensorProxyPtr scores_tensor = inputs.at(1).tensor();
const auto boxes_dims = boxes_tensor->getDimensions();
const auto scores_dims = scores_tensor->getDimensions();
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
const auto flag = true;
const auto* plugin_name = "NMS TRT Plugin ";
const auto* pluginName = "EfficientNMS_TFTRT_TRT";
#else
const auto flag = false;
const auto* plugin_name = "TensorRT BatchedNMS Plugin ";
const auto* pluginName = "BatchedNMS_TRT";
auto AllowNmsTopkOverride = []() {
static bool result = [] {
bool value;
const Status status = ReadBoolFromEnvVar("TF_TRT_ALLOW_NMS_TOPK_OVERRIDE",
false, &value);
if (!status.ok()) {
LOG(ERROR) << status;
}
return value;
}();
return result;
};
#endif
if (params->use_implicit_batch == flag) {
if (flag) {
return errors::Unimplemented(
convert_not_supported_implicit(node_def.op(), node_name));
} else {
if (!HasStaticShape(boxes_dims) || !HasStaticShape(scores_dims)) {
return errors::Unimplemented(plugin_name,
"requires input with static shape");
}
}
}
const auto& output_size_per_class = inputs.at(2).weights();
const auto& total_size = inputs.at(3).weights();
const auto& iou_threshold = inputs.at(4).weights();
const auto& score_threshold = inputs.at(5).weights();
const int offset = params->use_implicit_batch ? 0 : 1;
if (boxes_dims.nbDims != 3 + offset) {
return errors::InvalidArgument(
plugin_name, "input boxes must be 4-D including batch, at ", node_name);
}
AttrSlice attrs(node_def);
bool clip_boxes = false, pad_per_class = false;
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "clip_boxes", &clip_boxes));
TF_RETURN_IF_ERROR(GetNodeAttr(attrs, "pad_per_class", &pad_per_class));
const int class_idx = 1 + offset;
const int num_classes = scores_dims.d[class_idx];
const bool box_check =
boxes_dims.d[class_idx] == 1 || boxes_dims.d[class_idx] == num_classes;
if (!box_check) {
return errors::InvalidArgument(
plugin_name,
"third dimension of boxes must be either 1"
"or match the num_classes dimension of scores, at ",
node_name);
}
if (output_size_per_class.count() != 1) {
return errors::InvalidArgument(
plugin_name, "max_output_size_per_class must be scalar, at ",
node_name);
}
const int max_size_per_class = *(output_size_per_class.GetPointer<int>());
if (max_size_per_class <= 0) {
return errors::InvalidArgument(
plugin_name, "max_output_size_per_class should be > 0, at ", node_name);
}
if (total_size.count() != 1) {
return errors::InvalidArgument(
plugin_name, "max_total_size must be scalar, at ", node_name);
}
int max_total_size = *(total_size.GetPointer<int>());
if (max_total_size <= 0) {
return errors::InvalidArgument(
plugin_name, "max_total_size should be > 0, at ", node_name);
}
if (iou_threshold.count() != 1) {
return errors::InvalidArgument(
plugin_name, "iou_threshold must be scalar, at ", node_name);
}
const auto iou_thresh = *(iou_threshold.GetPointer<float>());
if (iou_thresh < 0.0 || iou_thresh > 1.0) {
return errors::InvalidArgument(
plugin_name, "iou_threshold must be in [0, 1], at", node_name);
}
if (score_threshold.count() != 1) {
return errors::InvalidArgument(
plugin_name, "score_threshold must be scalar, at ", node_name);
}
#if !IS_TRT_VERSION_GE(8, 2, 1, 6) && !defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
const bool is_normalized = true;
const int backgrnd_id = -1;
const bool share_location = (boxes_dims.d[class_idx] == 1);
int keep_top_k =
pad_per_class ? std::min(max_size_per_class * num_classes, max_total_size)
: max_total_size;
const int num_boxes = boxes_dims.d[offset];
int top_k = std::max(num_boxes, keep_top_k);
if (top_k > 4096) {
if (AllowNmsTopkOverride()) {
top_k = 4096;
keep_top_k = std::min(top_k, keep_top_k);
} else {
return errors::InvalidArgument(
"TRT NMS plugin allow top_k<=4096, where top_k = max(num_boxes, "
"max_total_size). You can override this by setting "
"TF_TRT_ALLOW_NMS_TOPK_OVERRIDE=1 environment variable, but this can "
"result in a loss of accuracy.");
}
}
#endif
if (params->validation_only) return OkStatus();
float score_thresh = *(score_threshold.GetPointer<float>());
nvinfer1::PluginField fields[] = {
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
{"max_output_size_per_class", &max_size_per_class,
nvinfer1::PluginFieldType::kINT32, 1},
{"max_total_size", &max_total_size, nvinfer1::PluginFieldType::kINT32, 1},
{"iou_threshold", &iou_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"score_threshold", &score_thresh, nvinfer1::PluginFieldType::kFLOAT32,
1},
{"pad_per_class", &pad_per_class, nvinfer1::PluginFieldType::kINT32, 1},
{"clip_boxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1},
#else
{"shareLocation", &share_location, nvinfer1::PluginFieldType::kINT32, 1},
{"backgroundLabelId", &backgrnd_id, nvinfer1::PluginFieldType::kINT32, 1},
{"numClasses", &num_classes, nvinfer1::PluginFieldType::kINT32, 1},
{"topK", &top_k, nvinfer1::PluginFieldType::kINT32, 1},
{"keepTopK", &keep_top_k, nvinfer1::PluginFieldType::kINT32, 1},
{"scoreThreshold", &score_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"iouThreshold", &iou_thresh, nvinfer1::PluginFieldType::kFLOAT32, 1},
{"isNormalized", &is_normalized, nvinfer1::PluginFieldType::kINT32, 1},
{"clipBoxes", &clip_boxes, nvinfer1::PluginFieldType::kINT32, 1},
#endif
};
nvinfer1::PluginFieldCollection fc{sizeof(fields) / sizeof(fields[0]),
fields};
auto creator = getPluginRegistry()->getPluginCreator(pluginName, "1", "");
TFTRT_RETURN_ERROR_IF_NULLPTR(creator, node_name);
TrtUniquePtrType<nvinfer1::IPluginV2> plugin(
creator->createPlugin(node_name.c_str(), &fc));
TFTRT_RETURN_ERROR_IF_NULLPTR(plugin, node_name);
std::vector<nvinfer1::ITensor*> trt_plugin_inputs;
trt_plugin_inputs.push_back(boxes_tensor->trt_tensor());
trt_plugin_inputs.push_back(scores_tensor->trt_tensor());
nvinfer1::IPluginV2Layer* layer = params->converter->network()->addPluginV2(
&trt_plugin_inputs[0], static_cast<int>(trt_plugin_inputs.size()),
*plugin);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
params->converter->SetLayerName(layer, node_def, "plugin");
const ITensorProxyPtr output_detection_boxes = layer->getOutput(1);
const ITensorProxyPtr output_detection_scores = layer->getOutput(2);
ITensorProxyPtr output_num_detections = layer->getOutput(0);
ITensorProxyPtr output_detection_classes = layer->getOutput(3);
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
nvinfer1::IIdentityLayer* layer_detection_classes =
params->converter->network()->addIdentity(
*output_detection_classes->trt_tensor());
layer_detection_classes->setOutputType(0, nvinfer1::DataType::kFLOAT);
output_detection_classes = layer_detection_classes->getOutput(0);
std::vector<int> input_dims{output_num_detections->getDimensions().d[0], 0};
TF_RETURN_IF_ERROR(params->converter->SqueezeTensor(
output_num_detections,
&input_dims,
params,
&output_num_detections));
#endif
params->outputs->push_back(TRT_TensorOrWeights(output_detection_boxes));
params->outputs->push_back(TRT_TensorOrWeights(output_detection_scores));
params->outputs->push_back(TRT_TensorOrWeights(output_detection_classes));
params->outputs->push_back(TRT_TensorOrWeights(output_num_detections));
return OkStatus();
}
#endif
Status ConvertResize(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(CheckInputsWeights(
*params,
{{"input", TrtInputArg::kTensor}, {"size", TrtInputArg::kBoth}}));
TF_RETURN_IF_ERROR(AllowDataTypes(
*params, {DataType::DT_FLOAT, DataType::DT_HALF, DataType::DT_INT32}));
ITensorProxyPtr inputs_tensor = inputs.at(0).tensor();
TFTRT_RETURN_ERROR_IF_NULLPTR(inputs_tensor, params->node_def.name());
const bool const_output_size = inputs.at(1).is_weights();
if (const_output_size) {
if (inputs.at(1).weights().count() != 2) {
return errors::Unimplemented("Resize requires 2D values for the size");
}
} else {
if (params->use_implicit_batch) {
return errors::Unimplemented(
"Resize requires constant size in implicit batch mode");
}
TF_RETURN_IF_ERROR(ExpectShapeTensor(inputs.at(1)));
if (inputs.at(1).tensor()->getDimensions().d[0] != 2) {
return errors::Unimplemented("Resize requires 2D values for the size");
}
}
bool align_corners;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), "align_corners", &align_corners));
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
nvinfer1::ResizeMode resize_mode;
if (node_def.op() == "ResizeBilinear") {
#if IS_TRT_VERSION_GE(7, 1, 0, 0)
if (!align_corners) {
return errors::InvalidArgument(
"Cannot Convert Bilinear Resize when align_corners=False");
}
#endif
resize_mode = nvinfer1::ResizeMode::kLINEAR;
} else if (node_def.op() == "ResizeNearestNeighbor") {
resize_mode = nvinfer1::ResizeMode::kNEAREST;
} else {
return errors::Unimplemented(node_def.op(), " is not yet implemented");
}
if (params->validation_only) return OkStatus();
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
inputs_tensor, {0, 3, 1, 2}, &inputs_tensor, node_def, "to_NCHW"));
nvinfer1::Dims output_shape_dims;
ITensorProxyPtr output_shape_tensor;
const bool static_output_shape =
HasStaticShape(inputs_tensor->getDimensions()) && const_output_size;
if (static_output_shape) {
output_shape_dims.nbDims = inputs_tensor->getDimensions().nbDims;
for (int i = 0; i < output_shape_dims.nbDims; ++i) {
output_shape_dims.d[i] = inputs_tensor->getDimensions().d[i];
}
const int* weights_ptr = inputs.at(1).weights().GetPointer<int>();
output_shape_dims.d[output_shape_dims.nbDims - 2] = weights_ptr[0];
output_shape_dims.d[output_shape_dims.nbDims - 1] = weights_ptr[1];
} else {
ITensorProxyPtr shape = params->converter->network()
->addShape(*inputs_tensor->trt_tensor())
->getOutput(0);
ITensorProxyPtr batch_size =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr num_channels =
params->converter->network()
->addSlice(*shape->trt_tensor(), {1, {1}}, {1, {1}}, {1, {1}})
->getOutput(0);
ITensorProxyPtr height, width;
if (const_output_size) {
const int* weights_ptr = inputs.at(1).weights().GetPointer<int>();
TF_RETURN_IF_ERROR(CreateScalarConstant(params, weights_ptr[0], &height));
TF_RETURN_IF_ERROR(CreateScalarConstant(params, weights_ptr[1], &width));
} else {
ITensorProxyPtr size = inputs.at(1).tensor();
height = params->converter->network()
->addSlice(*size->trt_tensor(), {1, {0}}, {1, {1}}, {1, {1}})
->getOutput(0);
width = params->converter->network()
->addSlice(*size->trt_tensor(), {1, {1}}, {1, {1}}, {1, {1}})
->getOutput(0);
}
StatusOr<ITensorProxyPtr> result = ConcatenateTensors(
params, {batch_size, num_channels, height, width}, 0);
TF_RETURN_IF_ERROR(result.status());
output_shape_tensor = result.value();
}
nvinfer1::IResizeLayer* layer =
params->converter->network()->addResize(*inputs_tensor->trt_tensor());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def);
layer->setResizeMode(resize_mode);
layer->setAlignCorners(align_corners);
if (static_output_shape) {
layer->setOutputDimensions(output_shape_dims);
} else {
layer->setInput(1, *output_shape_tensor->trt_tensor());
}
ITensorProxyPtr output = layer->getOutput(0);
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
output, {0, 2, 3, 1}, &output, node_def, "to_NHWC"));
params->outputs->push_back(TRT_TensorOrWeights(output));
return OkStatus();
}
Status ConvertAddN(const OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TF_RETURN_IF_ERROR(
AllowDataTypes(*params, {DataType::DT_FLOAT, DataType::DT_HALF}));
int num_inputs;
TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(node_def), "N", &num_inputs));
if (num_inputs < 2) {
return errors::InvalidArgument("AddN requires at least two inputs");
}
TFTRT_CHECK_INPUT_SIZE(inputs.size(), num_inputs, node_def);
for (const auto& input : inputs) {
if (!input.is_tensor() && input.weights().Shape().dim(0) != 1) {
return errors::InvalidArgument(
"Weights input to AddN is required to have batch dimension 1.");
}
}
if (params->validation_only) return OkStatus();
std::vector<ITensorProxyPtr> tensor_inputs;
tensor_inputs.reserve(inputs.size());
for (const auto& input : inputs) {
if (input.is_tensor()) {
tensor_inputs.push_back(input.tensor());
} else {
auto dims = input.weights().Shape();
if (params->use_implicit_batch) {
TF_RETURN_IF_ERROR(dims.RemoveBatchDimension());
}
tensor_inputs.push_back(params->converter->CreateConstantLayer(
input.weights(), dims.AsTrtDims()));
}
}
ITensorProxyPtr lhs = tensor_inputs[0];
for (int i = 1; i < num_inputs; ++i) {
ITensorProxyPtr rhs = tensor_inputs[i];
nvinfer1::ILayer* layer = params->converter->network()->addElementWise(
*lhs->trt_tensor(), *rhs->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->SetLayerName(layer, node_def, std::to_string(i));
lhs = layer->getOutput(0);
}
params->outputs->push_back(TRT_TensorOrWeights(lhs));
return OkStatus();
}
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertBiasAdd, "BiasAdd");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertClipByValue, "ClipByValue");
#if IS_TRT_VERSION_GE(7, 1, 3, 0) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertCombinedNMS,
"CombinedNonMaxSuppression");
#endif
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertAddN, "AddN");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertCast, "Cast");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConcat, "ConcatV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConst, "Const");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2D, "Conv2D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2DBackpropInput,
"Conv2DBackpropInput");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertDepthSpaceShuffle, "DepthToSpace");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv2DDepthwise,
"DepthwiseConv2dNative");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertExpandDims, "ExpandDims");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFusedConv2DBiasActivation,
"FusedConv2DBiasActivation");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertGather, "GatherV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertMatMul, "MatMul");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPack, "Pack");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPad, "Pad");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertReshape, "Reshape");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv3D, "Conv3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertConv3DBackpropInputV2,
"Conv3DBackpropInputV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertResize, "ResizeBilinear");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertResize, "ResizeNearestNeighbor");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool3D, "AvgPool3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool3D, "MaxPool3D");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertShape, "Shape");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSlice, "Slice");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertDepthSpaceShuffle, "SpaceToDepth");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSplit, "Split");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSquare, "Square");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSquaredDifference,
"SquaredDifference");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertSqueeze, "Squeeze");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertStridedSlice, "StridedSlice");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertTopK, "TopKV2");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertTranspose, "Transpose");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertUnpack, "Unpack");
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertPool, {"MaxPool", "AvgPool"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFusedBatchNorm,
{"FusedBatchNorm", "FusedBatchNormV2",
"FusedBatchNormV3"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertReduce,
{"Sum", "Prod", "Max", "Min", "Mean"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertArgMinMax, {"ArgMin", "ArgMax"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertIdentity,
{"Identity", "IdentityN", "Snapshot",
"StopGradient", "_CopyFromHostToGpu"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertBatchMatMul,
{"BatchMatMul", "BatchMatMulV2"});
REGISTER_DEFAULT_TRT_OP_CONVERTER(ConvertFake, "FakeOp");
static Status SetDeviceInfoInNodes(GraphDef* graph_def, const string& device) {
for (auto& node : *(graph_def->mutable_node())) {
*node.mutable_device() = device;
}
return OkStatus();
}
Status ConvertGraphDefToEngine(
const GraphDef& gdef, OpKernelContext* ctx, TrtPrecisionMode precision_mode,
int max_batch_size, size_t max_workspace_size_bytes,
const std::vector<PartialTensorShape>& input_shapes,
nvinfer1::ILogger* trt_logger, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator,
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, bool use_calibration,
const bool use_implicit_batch, bool* convert_successfully,
TrtShapeOptimizationProfile* profiles, absl::string_view engine_name,
bool use_explicit_precision, tensorflow::grappler::Cluster* cluster,
const string& device) {
engine->reset();
if (convert_successfully) *convert_successfully = false;
auto statusor = Converter::Create(precision_mode, use_calibration, trt_logger,
use_implicit_batch, engine_name,
use_explicit_precision, ctx);
TF_RETURN_IF_ERROR(statusor.status());
std::unique_ptr<Converter> converter = std::move(statusor.value());
GraphDef graph = gdef;
if (cluster != nullptr) {
bool apply_layout_optim;
Status status =
ReadBoolFromEnvVar("TF_TRT_ENABLE_LAYOUT_OPTIMIZER",
true, &apply_layout_optim);
if (!status.ok()) {
LOG(ERROR) << status;
}
if (apply_layout_optim) {
tensorflow::grappler::GrapplerItem grappler_item;
grappler_item.graph = gdef;
TF_RETURN_IF_ERROR(SetDeviceInfoInNodes(&grappler_item.graph, device));
tensorflow::grappler::GenericLayoutOptimizer layout_optimizer("NCHW");
TF_RETURN_IF_ERROR(
layout_optimizer.Optimize(cluster, grappler_item, &graph));
grappler_item.graph = graph;
tensorflow::grappler::ConstantFolding const_optimizer(
nullptr,
false,
false);
TF_RETURN_IF_ERROR(
const_optimizer.Optimize(cluster, grappler_item, &graph));
Graph g(OpRegistry::Global());
TF_RETURN_IF_ERROR(
ConvertGraphDefToGraph(GraphConstructorOptions(), graph, &g));
g.ToGraphDef(&graph);
}
}
VLOG(1) << "Starting to convert TensorFlow ops to TensorRT layers";
std::vector<Converter::EngineOutputInfo> output_tensors;
int num_layers = converter->network()->getNbLayers();
absl::flat_hash_set<const char*> layer_names;
for (const auto& node_def : graph.node()) {
const string& node_name = node_def.name();
VLOG(2) << "Converting node " << node_name << ", op=" << node_def.op();
if (IsEngineInput(node_name)) {
int32 slot_number = -1;
string type_key;
if (node_def.op() == "Placeholder") {
if (!strings::safe_strto32(
node_name.c_str() + strlen(IONamePrefixes::kInputPHName),
&slot_number)) {
return errors::InvalidArgument("Failed to parse slot number from ",
node_name);
}
type_key = "dtype";
} else if (tensorflow::grappler::IsArg(node_def)) {
slot_number = node_def.attr().at("index").i();
type_key = "T";
} else {
return errors::InvalidArgument(
"Node ", node_name,
" with is neither Placeholder nor Arg, instead ", node_def.op());
}
DataType tf_dtype = node_def.attr().at(type_key).type();
if (tf_dtype == DT_RESOURCE) {
VLOG(2) << "Adding engine input resource " << node_name;
if (ctx == nullptr) {
return errors::InvalidArgument(
"Variable resource type conversion requires a valid ctx");
}
if (ctx->input(slot_number).NumElements() == 0) {
return errors::InvalidArgument("Resource input ", node_name,
" is empty.");
}
TF_RETURN_IF_ERROR(converter->AddInputResource(
node_name, ctx->input(slot_number).flat<ResourceHandle>()(0)));
} else {
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
const auto shape = input_shapes.at(slot_number);
const auto status = ValidateTensorProperties(
node_def.op(), node_def.attr().at(type_key).type(), shape,
use_implicit_batch, false, &trt_dtype,
&trt_dims, &batch_size);
if (!status.ok()) {
const string error_message =
StrCat("Validation failed for ", node_name, " and input slot ",
slot_number, ": ", status.message());
LOG_WARNING_WITH_PREFIX << error_message;
return errors::CreateWithUpdatedMessage(status, error_message);
}
VLOG(2) << "Adding engine input tensor " << node_name << " with shape "
<< DebugString(trt_dims);
TF_RETURN_IF_ERROR(converter->AddInputTensor(node_name, trt_dtype,
trt_dims, batch_size));
}
} else if (IsEngineOutput(node_name)) {
int32 slot_number = -1;
if (node_def.op() == "Identity") {
if (!strings::safe_strto32(
node_name.c_str() + strlen(IONamePrefixes::kOutputPHName),
&slot_number)) {
return errors::InvalidArgument("Failed to parse slot number from ",
node_name);
}
} else if (tensorflow::grappler::IsRetval(node_def)) {
slot_number = node_def.attr().at("index").i();
} else {
return errors::InvalidArgument(
"Node with name ", node_name,
" starting with IONamePrefixes::kOutputPHName is "
"neither Identity nor Retval, instead ",
node_def.op());
}
string out_type_key;
if (node_def.op() == "ReadVariableOp" ||
node_def.op() == "ResourceGather") {
out_type_key = "dtype";
} else {
out_type_key = "T";
}
DataType tf_dtype;
TF_RETURN_IF_ERROR(
GetNodeAttr(AttrSlice(node_def), out_type_key, &tf_dtype));
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(TfTypeToTrtType(tf_dtype, &trt_dtype));
if (output_tensors.size() <= slot_number) {
output_tensors.resize(slot_number + 1);
}
output_tensors.at(slot_number) = {node_def.input(0), node_name,
trt_dtype};
} else {
TF_RETURN_IF_ERROR(converter->ConvertNode(node_def));
}
int new_num_layers = converter->network()->getNbLayers();
for (int i = num_layers; i < new_num_layers; i++) {
auto layer = converter->network()->getLayer(i);
if (layer->getName() == nullptr ||
!layer_names.insert(layer->getName()).second) {
std::string error_message = absl::StrCat(
"Converting node ", node_name, ", op=", node_def.op(),
layer->getName() ? " creates a layer with name collision"
: " creates a layer without a name");
LOG_WARNING_WITH_PREFIX << error_message;
return errors::Internal(error_message);
}
}
num_layers = new_num_layers;
}
TF_RETURN_IF_ERROR(converter->RenameAndMarkOutputTensors(output_tensors));
if (convert_successfully) *convert_successfully = true;
if (!use_explicit_precision) {
converter->MaybeApplyQuantizationRanges();
}
TF_RETURN_IF_ERROR(converter->BuildCudaEngine(
engine, max_batch_size, max_workspace_size_bytes, allocator, calibrator,
profiles));
VLOG(1) << "Finished conversion";
return OkStatus();
}
Status ConvertSegmentToGraphDef(
const Graph* graph, const grappler::GraphProperties& graph_properties,
const std::vector<const Node*>& subgraph_nodes,
EngineInfo* engine_info) {
tensorflow::profiler::TraceMe activity(
"ConvertSegmentToGraphDef", tensorflow::profiler::TraceMeLevel::kInfo);
std::vector<EngineConnection>* connections = &engine_info->connections;
GraphDef* segment_def = &engine_info->segment_graph_def;
std::set<string> marker_nodes;
for (size_t i = 0; i < connections->size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Constructing TRTEngine IO: ", i + 1, "/",
connections->size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto& connection = connections->at(i);
if (connection.is_control_edge()) continue;
auto outside_node = graph->FindNodeId(connection.outside_id);
if (!outside_node) {
return errors::NotFound("Cannot find node with id ",
connection.outside_id, " in the graph.");
}
DataType dtype;
PartialTensorShape partial_shape;
if (connection.is_input_edge) {
GetOutputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.outside_shape = partial_shape;
} else {
GetInputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.inside_shape = partial_shape;
}
connection.connection_type = dtype;
if (connection.is_input_edge) {
const string node_name =
StrCat(IONamePrefixes::kInputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing input " << node_name << " for the edge "
<< connection.outside_node_name << ":"
<< connection.outside_port << " -> "
<< connection.inside_node_name << ":" << connection.inside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
NodeDefBuilder builder(node_name, "_Arg");
auto status = builder.Attr("shape", partial_shape)
.Attr("T", dtype)
.Attr("index", connection.port_number)
.Finalize(seg_node);
VLOG(1) << "Constructing input " << node_name << " for the edge "
<< connection.outside_node_name << ":" << connection.outside_port
<< " -> " << connection.inside_node_name << ":"
<< connection.inside_port;
} else {
const string node_name =
StrCat(IONamePrefixes::kOutputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
NodeDefBuilder builder(node_name, "_Retval");
auto status =
builder.Attr("T", dtype)
.Attr("index", connection.port_number)
.Input(connection.inside_node_name, connection.inside_port, dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
}
}
std::unordered_map<int, int> old_to_new_id_map;
string local_scope = subgraph_nodes.front()->name();
int i = 0;
for (const Node* node : subgraph_nodes) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Copy Node to Subgraph: ", ++i, "/",
subgraph_nodes.size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
local_scope = GetCommonNameScope(local_scope, node->name());
old_to_new_id_map[node->id()] = segment_def->node_size();
auto snode = segment_def->add_node();
*snode = node->def();
VLOG(2) << "Copying " << snode->name() << " to subgraph";
}
for (int i = 0; i < connections->size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Updating Subgraph Input: ", i + 1, "/",
connections->size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto& connection = connections->at(i);
if (connection.is_control_edge() || !connection.is_input_edge) continue;
auto snode =
segment_def->mutable_node(old_to_new_id_map[connection.inside_id]);
const string arg_name =
StrCat(IONamePrefixes::kInputPHName, connection.port_number);
VLOG(1) << "Updating " << snode->name() << ":" << connection.inside_port
<< " from " << snode->input(connection.inside_port) << " to "
<< arg_name;
snode->set_input(connection.inside_port, arg_name);
}
std::set<string> subgraph_node_names;
{
tensorflow::profiler::TraceMe activity(
"Constructing subgraph_node_names set: ",
tensorflow::profiler::TraceMeLevel::kInfo);
for (const Node* node : subgraph_nodes) {
subgraph_node_names.insert(node->name());
}
}
for (int i = 0; i < segment_def->node_size(); ++i) {
tensorflow::profiler::TraceMe activity(
[&] {
return StrCat("Removing outside to subgraph control inputs: ", i + 1,
"/", segment_def->node_size());
},
tensorflow::profiler::TraceMeLevel::kInfo);
auto snode = segment_def->mutable_node(i);
const int input_size = snode->input_size();
int input_idx = 0;
int actual_input_idx = 0;
while (input_idx < input_size) {
TensorId input = ParseTensorName(snode->input(input_idx));
if (!subgraph_node_names.count(
string(input.first.data(), input.first.size())) &&
!IsEngineInput(input.first)) {
if (input.second == Graph::kControlSlot) {
VLOG(1) << "... removing control inputs " << input.first
<< " from subgraph.";
++input_idx;
continue;
}
}
if (actual_input_idx != input_idx) {
snode->set_input(actual_input_idx, snode->input(input_idx));
}
++input_idx;
++actual_input_idx;
}
for (int remove = input_size - actual_input_idx; remove > 0; --remove) {
snode->mutable_input()->RemoveLast();
}
}
return OkStatus();
}
bool OutputEdgeValidator::operator()(const Edge* out_edge) const {
if (out_edge->IsControlEdge()) return true;
if (out_edge->src()->type_string() == "Const") {
VLOG(1) << "--> Need to remove output node " << out_edge->src()->name()
<< " which is a Const.";
return false;
}
return true;
}
ITensorProxyPtr TRT_TensorOrWeights::as_tensor(
const OpConverterParams* params) {
if (is_tensor()) {
return tensor();
} else {
return params->converter->CreateConstantLayer(weights(), GetTrtDims());
}
}
std::string unexpected_type_error_msg(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected,
const NodeDef& node_def, int idx) {
return "The '" + node_def.input(idx) + "' parameter of " + node_def.op() +
" operation in " + node_def.name() + " is expected to be of type " +
DebugString(type_expected) + " type, got " +
DebugString(type_being_checked) + ".";
}
string batch_size_error(absl::string_view name, absl::string_view comment) {
return StrCat("Batch size doesn't match for tensor '", name, "' : ", comment);
}
Status check_type(nvinfer1::DataType type_being_checked,
nvinfer1::DataType type_expected, const NodeDef& node_def,
int idx) {
if (type_being_checked == type_expected) return OkStatus();
return errors::InvalidArgument(unexpected_type_error_msg(
type_being_checked, type_expected, node_def, idx));
}
std::string convert_not_supported_implicit(const std::string& pOpName,
const std::string& pNodeName,
const char* pOpType) {
const auto oper = pOpType ? absl::StrCat(pOpType, " ") : string("");
return absl::StrCat("Convertion for ", oper, "op: '", pOpName,
"' is not supported in implicit batch mode, at ",
pNodeName);
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <iterator>
#include <memory>
#include <numeric>
#include <type_traits>
#include <unordered_map>
#include <vector>
#include "absl/time/civil_time.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/base/call_once.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/gpu/gpu_managed_allocator.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/resource_var.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/tensor_slice_reader_cache.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
enum class TrtTestMode {
kImplicitBatch = 0,
kExplicitBatch = 1,
kDynamicShape = 2
};
string DebugString(const TrtTestMode mode) {
switch (mode) {
case TrtTestMode::kImplicitBatch:
return "kImplicitBatch";
case TrtTestMode::kExplicitBatch:
return "kExplicitBatch";
case TrtTestMode::kDynamicShape:
return "kDynamicShape";
default:
return "Invalid TrtTestMode";
}
}
namespace convert {
using absl::StrCat;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::Matcher;
using ::testing::PrintToString;
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
constexpr std::array<TrtTestMode, 3> ValidTrtModes = {
TrtTestMode::kImplicitBatch, TrtTestMode::kExplicitBatch,
TrtTestMode::kDynamicShape};
bool TrtShapedWeightsEquals(const TRT_ShapedWeights& lhs,
const TRT_ShapedWeights& rhs) {
return lhs.Shape() == rhs.Shape() && lhs.TrtDType() == rhs.TrtDType() &&
lhs.GetPointer<int8>() == rhs.GetPointer<int8>();
}
template <typename T>
void ValidateWeights(const TRT_ShapedWeights& weights,
const std::vector<int>& expected_dims,
const std::vector<T>& expected_value) {
EXPECT_EQ(weights.Shape(), DimsAdapter(expected_dims));
ASSERT_EQ(expected_value.size(), weights.count()) << weights.DebugString();
const T* actual_values = weights.GetPointer<T>();
for (int i = 0; i < expected_value.size(); ++i) {
EXPECT_EQ(expected_value[i], actual_values[i]);
}
}
TEST(TRT_ShapedWeights_Test, Basic) {
{
TRT_ShapedWeights weights;
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TRT_ShapedWeights weights(nvinfer1::DataType::kFLOAT);
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_EQ(nullptr, trt_weights.values);
EXPECT_EQ(0, trt_weights.count);
EXPECT_EQ(nullptr, ptr->GetPointer<int8>());
EXPECT_EQ(0, ptr->count());
EXPECT_EQ(0, ptr->size_bytes());
}
}
{
TrtWeightStore store;
TRT_ShapedWeights weights =
store.GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims({2, 5}))
.value();
TRT_ShapedWeights copy(weights);
for (auto ptr : {&weights, ©}) {
nvinfer1::Weights trt_weights = ptr->GetTrtWeights();
EXPECT_EQ(nvinfer1::DataType::kFLOAT, trt_weights.type);
EXPECT_NE(nullptr, trt_weights.values);
EXPECT_EQ(10, trt_weights.count);
EXPECT_EQ(trt_weights.values, ptr->GetPointer<int8>());
EXPECT_EQ(10, ptr->count());
EXPECT_EQ(40, ptr->size_bytes());
}
EXPECT_EQ(weights.GetPointer<int8>(), copy.GetPointer<int8>());
}
}
TEST(TRT_TensorOrWeights_Test, Basic) {
{
TRT_TensorOrWeights tw;
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(-1, ptr->batch_size());
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
ITensorProxyPtr itensor(dims);
TRT_TensorOrWeights tw(itensor);
TRT_TensorOrWeights tw1(itensor, 1);
for (auto original_ptr : {&tw, &tw1}) {
TRT_TensorOrWeights copy(*original_ptr);
TRT_TensorOrWeights assigned;
assigned = *original_ptr;
for (auto ptr : {original_ptr, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
if (original_ptr == &tw) {
EXPECT_EQ(-1, ptr->batch_size());
} else {
EXPECT_EQ(1, ptr->batch_size());
}
EXPECT_EQ(itensor->simple_tensor(), ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
}
{
nvinfer1::Dims dims;
dims.nbDims = 1;
dims.d[0] = 1;
TRT_TensorOrWeights tw(nvinfer1::DataType::kFLOAT, dims, 1);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
ASSERT_TRUE(ptr->is_tensor());
EXPECT_EQ(false, ptr->is_weights());
EXPECT_EQ(1, ptr->batch_size());
EXPECT_NE(nullptr, ptr->tensor()->simple_tensor());
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray({1}));
}
}
{
TRT_ShapedWeights weights;
TRT_TensorOrWeights tw(weights);
TRT_TensorOrWeights copy(tw);
TRT_TensorOrWeights assigned;
assigned = tw;
for (auto ptr : {&tw, ©, &assigned}) {
EXPECT_EQ(false, ptr->is_tensor());
EXPECT_EQ(true, ptr->is_weights());
EXPECT_TRUE(TrtShapedWeightsEquals(weights, ptr->weights()));
std::vector<int> empty_dims;
EXPECT_THAT(ptr->GetTrtDims(), DimsAreArray(empty_dims));
}
}
}
class ValidatorTest : public ::testing::Test {
public:
ValidatorTest() {}
Status ConvertToTensorOrWeights(const Scope& scope, const Node* node,
int output_port,
TRT_TensorOrWeights* tensor_or_weights) {
grappler::GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
return validator.ConvertToTensorOrWeights(node->def(), output_port,
tensor_or_weights);
}
};
TEST_F(ValidatorTest, ConvertToTensorOrWeights) {
{
Scope s = Scope::NewRootScope();
auto node =
ops::Const(s.WithOpName("my_const"), {1.0f, 2.0f}, TensorShape({2}));
TRT_TensorOrWeights output;
EXPECT_THAT(ConvertToTensorOrWeights(s, node.op().node(),
0, &output),
IsOk());
ValidateWeights<float>(output.weights(), {2}, {1.0, 2.0});
}
auto convert_to_tensor_or_weights = [this](const std::vector<int64_t>& dims,
TRT_TensorOrWeights* output) {
Scope s = Scope::NewRootScope();
const auto attrs = ops::Placeholder::Shape(PartialTensorShape{dims});
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, attrs);
auto add = ops::Add(s.WithOpName("add"), feed, feed);
return this->ConvertToTensorOrWeights(s, add.operation.node(),
0, output);
};
{
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights(
std::vector<int64_t>(nvinfer1::Dims::MAX_DIMS + 2, 1), &output),
StatusIs(absl::StatusCode::kOutOfRange,
HasSubstr("Input tensor rank is greater than 9")));
}
{
TRT_TensorOrWeights output;
EXPECT_THAT(convert_to_tensor_or_weights({}, &output),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Scalar input tensor is not supported since "
"the first dimension "
"is treated as batch dimension by TRT")));
}
for (const int32 non_batch_dim : {-1, 2}) {
const int32 batch_size = 12;
TRT_TensorOrWeights output;
EXPECT_THAT(
convert_to_tensor_or_weights({batch_size, non_batch_dim}, &output),
IsOk());
ASSERT_TRUE(output.is_tensor());
EXPECT_EQ(batch_size, output.batch_size());
EXPECT_NE(nullptr, output.tensor()->simple_tensor());
EXPECT_THAT(output.GetTrtDims(), DimsAreArray({non_batch_dim}));
}
}
TEST_F(ValidatorTest, IsTensorRTCandidate_Basics) {
Scope s = Scope::NewRootScope();
auto input =
ops::Const(s.WithOpName("const"), {1.0f, 2.0f}, TensorShape({2}));
auto add = ops::Add(s.WithOpName("add"), input, input);
const Node* add_node = add.operation.node();
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(graph_properties, TrtPrecisionMode::FP32,
false,
true,
false);
bool start_conversion = false;
bool should_fail = false;
auto op_converter = [&start_conversion, &should_fail](
const OpConverterParams* params) -> Status {
if (should_fail) return errors::InvalidArgument("");
if (!params->validation_only) start_conversion = true;
return OkStatus();
};
auto original_op_converter = GetOpConverterRegistry()->LookUp("Add");
ASSERT_TRUE(original_op_converter.ok());
GetOpConverterRegistry()->Clear("Add");
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Add is not supported.")));
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority + 1,
op_converter);
TF_EXPECT_OK(validator.IsTensorRTCandidate(add_node));
EXPECT_EQ(false, start_conversion);
should_fail = true;
EXPECT_THAT(validator.IsTensorRTCandidate(add_node),
StatusIs(absl::StatusCode::kInvalidArgument));
GetOpConverterRegistry()->Clear("Add");
GetOpConverterRegistry()->Register("Add", kDefaultConverterPriority,
*original_op_converter);
}
TEST(TrtNodeValidator, IsTensorRTCandidate) {
const std::vector<int32> input_shape_array{2, 2};
TensorShape input_shape;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(input_shape_array, &input_shape));
Scope s = Scope::NewRootScope();
ops::Placeholder::Attrs feed_attrs;
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(input_shape_array, &feed_attrs.shape_));
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT, feed_attrs);
auto const_1 = ops::Const(s.WithOpName("const_1"), 1.0f, input_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), feed, const_1);
ops::MatMul::Attrs matmul_attrs;
matmul_attrs.transpose_a_ = true;
auto incompatible_matmul = ops::MatMul(s.WithOpName("incompatible_matmul"),
feed, const_1, matmul_attrs);
auto unsupported_op = ops::Erfc(s.WithOpName("sin"), feed);
auto incompatible_feed = ops::Placeholder(s.WithOpName("feed"), DT_DOUBLE);
auto const_2 = ops::Const(s.WithOpName("const_2"), 1.0, input_shape);
auto matmul_with_incompatible_input =
ops::MatMul(s.WithOpName("matmul_with_incompatible_input"),
incompatible_feed, const_2);
auto quantize_attrs = ops::FakeQuantWithMinMaxArgs::Min(-6.0f).Max(6.0f);
auto quantize = ops::FakeQuantWithMinMaxArgs(s.WithOpName("quantize"), feed,
quantize_attrs);
grappler::GrapplerItem item;
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
Tensor feed_tensor(DT_FLOAT, input_shape);
item.feed.push_back(std::make_pair("feed", feed_tensor));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
for (const TrtPrecisionMode precision_mode :
{TrtPrecisionMode::FP32, TrtPrecisionMode::INT8}) {
TrtNodeValidator validator(graph_properties, precision_mode,
false,
true,
false);
TF_EXPECT_OK(validator.IsTensorRTCandidate(matmul.operation.node()));
EXPECT_THAT(
validator.IsTensorRTCandidate(incompatible_matmul.operation.node()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("MatMul with 2D tensors requires explicit batch "
"mode, or that tensor A "
"is not transposed and B is a constant tensor.")));
EXPECT_THAT(validator.IsTensorRTCandidate(unsupported_op.operation.node()),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Op type Erfc is not supported")));
EXPECT_THAT(validator.IsTensorRTCandidate(
matmul_with_incompatible_input.operation.node()),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to convert at least one input to a "
"TRT_TensorOrWeights:")));
if (precision_mode == TrtPrecisionMode::INT8) {
TF_EXPECT_OK(validator.IsTensorRTCandidate(quantize.operation.node()));
} else {
EXPECT_THAT(
validator.IsTensorRTCandidate(quantize.operation.node()),
StatusIs(
absl::StatusCode::kUnimplemented,
HasSubstr("Op type FakeQuantWithMinMaxArgs is not supported")));
}
}
}
class ConverterTest : public ::testing::Test {
public:
ConverterTest() { Reset(); }
void Reset() {
GetOpConverterRegistry()->Clear("MyOp");
GetOpConverterRegistry()->Clear("DummyOp");
converter_ =
std::move(Converter::Create(TrtPrecisionMode::FP32,
false, &logger_,
true,
"TRTEngineOp_000_000",
false)
.value());
weight_store_ = &converter_->weight_store_;
}
Status MaybeUpdateBatchSize(int batch_size) {
return converter_->MaybeUpdateBatchSize(batch_size);
}
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input) {
return converter_->AddTensorOrWeights(name, input);
}
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output) {
return converter_->GetTensorOrWeights(name, output);
}
Status GetInputs(const NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
return converter_->GetInputs(node_def, inputs);
}
Status GetWeightRange(const TRT_ShapedWeights& weights, float* out_min,
float* out_max) const {
return converter_->GetWeightRange(weights, out_min, out_max);
}
int batch_size() const { return converter_->batch_size_; }
std::unordered_map<ITensorProxyPtr*, float>& quantization_ranges_proxy() {
return converter_->quantization_ranges_proxy_;
}
std::unordered_map<nvinfer1::ITensor*, float>& quantization_ranges() {
return converter_->quantization_ranges_;
}
private:
Logger& logger_ = *Logger::GetLogger();
protected:
std::unique_ptr<Converter> converter_;
TrtWeightStore* weight_store_;
};
TEST_F(ConverterTest, ConvertNode) {
ITensorProxyPtr output_tensors[2];
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Dims dims = params->inputs[0].tensor()->getDimensions();
for (int i = 0; i < 2; ++i) {
dims.d[0] += 1;
output_tensors[i]->setDimensions(dims);
params->outputs->push_back(TRT_TensorOrWeights(output_tensors[i]));
}
return OkStatus();
};
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_ASSERT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({123}), 1));
EXPECT_THAT(converter_->ConvertNode(node_def),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("No converter for op MyOp")));
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
TF_ASSERT_OK(converter_->ConvertNode(node_def));
TRT_TensorOrWeights actual_output_1;
TF_EXPECT_OK(GetTensorOrWeights("my_op", &actual_output_1));
EXPECT_EQ(output_tensors[0]->simple_tensor(),
actual_output_1.tensor()->simple_tensor());
EXPECT_EQ(124, actual_output_1.tensor()->getDimensions().d[0]);
TRT_TensorOrWeights actual_output_2;
TF_EXPECT_OK(GetTensorOrWeights("my_op:1", &actual_output_2));
EXPECT_EQ(output_tensors[1]->simple_tensor(),
actual_output_2.tensor()->simple_tensor());
EXPECT_EQ(125, actual_output_2.tensor()->getDimensions().d[0]);
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, AddAndGetInputs) {
NodeDef node_def;
node_def.add_input("^control_input");
node_def.add_input("input");
node_def.add_input("input:0");
node_def.add_input("input:1");
node_def.add_input("weird_input:2:3:4:0");
TF_EXPECT_OK(converter_->AddInputTensor("input", nvinfer1::DataType::kFLOAT,
CreateDims({1}), 1));
TF_EXPECT_OK(converter_->AddInputTensor("input:1", nvinfer1::DataType::kINT32,
CreateDims({2, 3}), 1));
TF_EXPECT_OK(converter_->AddInputTensor(
"weird_input:2:3:4", nvinfer1::DataType::kHALF, CreateDims({5, 3}), 1));
std::vector<TRT_TensorOrWeights> inputs;
TF_EXPECT_OK(GetInputs(node_def, &inputs));
EXPECT_EQ(4, inputs.size());
EXPECT_EQ(inputs[0].tensor()->trt_tensor(), inputs[1].tensor()->trt_tensor());
EXPECT_EQ(nvinfer1::DataType::kFLOAT, inputs[0].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kINT32, inputs[2].tensor()->getType());
EXPECT_EQ(nvinfer1::DataType::kHALF, inputs[3].tensor()->getType());
EXPECT_THAT(inputs[0].tensor()->getDimensions(), DimsAreArray({1}));
EXPECT_THAT(inputs[2].tensor()->getDimensions(), DimsAreArray({2, 3}));
EXPECT_THAT(inputs[3].tensor()->getDimensions(), DimsAreArray({5, 3}));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, RenameAndMarkOutputTensors) {
std::vector<ITensorProxyPtr> output_tensors;
auto op_converter =
[&output_tensors](const OpConverterParams* params) -> Status {
nvinfer1::Permutation perm;
perm.order[0] = 1;
perm.order[1] = 0;
for (int i = 0; i < 2; ++i) {
ITensorProxyPtr input_tensor = params->inputs[0].tensor();
nvinfer1::IShuffleLayer* layer =
params->converter->network()->addShuffle(*input_tensor->trt_tensor());
layer->setFirstTranspose(perm);
ITensorProxyPtr output_tensor = layer->getOutput(0);
params->outputs->emplace_back(output_tensor);
output_tensors.push_back(output_tensor);
}
TRT_ShapedWeights output_weights(nvinfer1::DataType::kFLOAT);
params->outputs->emplace_back(output_weights);
return OkStatus();
};
GetOpConverterRegistry()->Register("MyOp", kDefaultConverterPriority,
op_converter);
NodeDef node_def = MakeNodeDef("my_op", "MyOp", {"my_input"});
TF_EXPECT_OK(converter_->AddInputTensor(
"my_input", nvinfer1::DataType::kFLOAT, CreateDims({1, 2}), 1));
TF_EXPECT_OK(converter_->ConvertNode(node_def));
EXPECT_THAT(
converter_->RenameAndMarkOutputTensors({{"my_op:2", "my_output"}}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output my_op:2 is weights not tensor")));
TF_EXPECT_OK(converter_->RenameAndMarkOutputTensors(
{{"my_op", "my_output"}, {"my_op:1", "my_output_1"}}));
EXPECT_EQ(2, output_tensors.size());
for (auto output_tensor : output_tensors) {
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({2, 1}));
}
EXPECT_EQ("my_output", string(output_tensors[0]->getName()));
EXPECT_EQ("my_output_1", string(output_tensors[1]->getName()));
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, TransposeTensor) {
ITensorProxyPtr input_tensor = converter_->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims({2, 3, 5}));
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
EXPECT_THAT(converter_->TransposeTensor(input_tensor, {0, 1}, &output_tensor,
dummy_node_def, "sub1"),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Rank of perm for transpose does not match "
"with that of the input")));
EXPECT_THAT(
converter_->TransposeTensor(input_tensor, {1, 0, 2, 3}, &output_tensor,
dummy_node_def, "sub2"),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("Transpose at batch dimension is not supported.")));
TF_EXPECT_OK(converter_->TransposeTensor(
input_tensor, {0, 3, 1, 2}, &output_tensor, dummy_node_def, "sub3"));
EXPECT_THAT(output_tensor->getDimensions(), DimsAreArray({5, 2, 3}));
EXPECT_THAT(
converter_->network(),
LayerNamesAreArray({"TRTEngineOp_000_000/dummy_op-sub3:SHUFFLE"}));
}
void TestPrepareTensorForShape(
const std::vector<int>& input_dims, const std::vector<int>& reshape_dims,
const std::vector<int>& expected_tensor_dims, bool input_is_tensor,
Converter* converter, TrtWeightStore* weight_store,
absl::StatusCode expected_code = absl::StatusCode::kOk,
const char* expected_error_msg_substr = nullptr) {
TRT_TensorOrWeights input;
if (input_is_tensor) {
input = TRT_TensorOrWeights(converter->network()->addInput(
"", nvinfer1::DataType::kFLOAT, CreateDims(input_dims)));
} else {
input = TRT_TensorOrWeights(
weight_store
->GetTempWeights(nvinfer1::DataType::kFLOAT, CreateDims(input_dims))
.value());
}
ITensorProxyPtr output_tensor = nullptr;
NodeDef dummy_node_def = MakeNodeDef("dummy_op", "DummyOp", {});
for (bool validation_only : {false, true}) {
const Status status =
PrepareTensorForShape(converter, input, DimsAdapter(reshape_dims),
validation_only, &output_tensor, dummy_node_def);
if (expected_code == absl::StatusCode::kOk) {
TF_EXPECT_OK(status);
if (validation_only) {
EXPECT_EQ(nullptr, *output_tensor);
} else {
EXPECT_THAT(output_tensor->getDimensions(),
DimsAreArray(expected_tensor_dims));
}
} else {
EXPECT_THAT(status, StatusIs(expected_code,
HasSubstr(expected_error_msg_substr)));
}
}
}
TEST_F(ConverterTest, PrepareTensorForShape) {
for (bool input_is_tensor : {true, false}) {
Reset();
TestPrepareTensorForShape({2, 3, 5}, {2, 3, 6}, {}, input_is_tensor,
converter_.get(), weight_store_,
absl::StatusCode::kInvalidArgument,
"Incompatible shapes");
Reset();
TestPrepareTensorForShape({2, 3, 5}, {10, 3}, {10, 3}, input_is_tensor,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({1, 1}, {}, {}, input_is_tensor, converter_.get(),
weight_store_);
}
Reset();
TestPrepareTensorForShape({}, {1, 1}, {1, 1}, true,
converter_.get(), weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
true, converter_.get(),
weight_store_);
Reset();
TestPrepareTensorForShape({2, 3, 5}, {-1, 2}, {15, 2},
false, converter_.get(),
weight_store_, absl::StatusCode::kInvalidArgument,
"Shape is not fully defined");
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, MaybeUpdateBatchSize) {
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(-1, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
EXPECT_EQ(123, batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(-1));
EXPECT_EQ(123, batch_size());
EXPECT_THAT(
MaybeUpdateBatchSize(124),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"Provided batch size does not match converter batch size")));
}
TEST_F(ConverterTest, AddAndGetTensorOrWeights) {
ITensorProxyPtr simple_tensor;
TRT_TensorOrWeights tensor(simple_tensor);
EXPECT_EQ(-1, tensor.batch_size());
TF_EXPECT_OK(MaybeUpdateBatchSize(123));
TF_EXPECT_OK(AddTensorOrWeights("my_tensor", tensor));
TRT_TensorOrWeights added_tensor;
TF_EXPECT_OK(GetTensorOrWeights("my_tensor", &added_tensor));
EXPECT_EQ(123, added_tensor.batch_size());
EXPECT_THAT(AddTensorOrWeights("my_tensor", tensor),
StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("tensor/weights my_tensor already exist")));
}
template <typename T>
void TestGetWeightRange(ConverterTest* test, TrtWeightStore* weight_store) {
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(DataTypeToEnum<T>::v(), &trt_type));
TRT_ShapedWeights weights =
weight_store->GetTempWeights(trt_type, CreateDims({2, 3})).value();
const std::vector<T> values = {T(3), T(1), T(2), T(6), T(5), T(4)};
absl::c_copy(values, weights.GetPointer<T>());
float out_min = 0.0f;
float out_max = 0.0f;
TF_EXPECT_OK(test->GetWeightRange(weights, &out_min, &out_max));
EXPECT_EQ(1.0f, out_min);
EXPECT_EQ(6.0f, out_max);
}
TEST_F(ConverterTest, GetWeightRange) {
TestGetWeightRange<float>(this, weight_store_);
TestGetWeightRange<Eigen::half>(this, weight_store_);
TestGetWeightRange<int32>(this, weight_store_);
}
TEST_F(ConverterTest, ProvideQuantizationRange) {
ITensorProxyPtr simple_tensor;
converter_->ProvideQuantizationRange(&simple_tensor, 0.0f, 6.0f);
EXPECT_EQ(6.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, 1.0f, 6.0f);
EXPECT_EQ(6.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -8.0f, 6.0f);
EXPECT_EQ(8.0f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -8.123f, -6.123f);
EXPECT_EQ(8.123f, quantization_ranges_proxy()[&simple_tensor]);
converter_->ProvideQuantizationRange(&simple_tensor, -6.123f, 6.123f);
EXPECT_EQ(6.123f, quantization_ranges_proxy()[&simple_tensor]);
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, MaybeApplyQuantizationRanges) {
ITensorProxyPtr input;
ITensorProxyPtr not_infer;
Logger& logger = *Logger::GetLogger();
auto int8_converter = Converter::Create(TrtPrecisionMode::INT8,
true, &logger,
true,
"")
.value();
int8_converter->ProvideQuantizationRange(&input, -5.0f, 5.0f);
int8_converter->ProvideQuantizationRange(¬_infer, -100.0f, 100.0f);
int8_converter->MaybeApplyQuantizationRanges();
EXPECT_EQ(input->getDynamicRangeMax(), 5.0f);
EXPECT_EQ(not_infer->getDynamicRangeMax(), 100.0f);
EXPECT_THAT(int8_converter->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, GetTrtBroadcastShape) {
const bool kIsTensor = true;
const bool kIsNotTensor = false;
auto symmetric_test = [this](const std::vector<int>& operand_1_shape,
const std::vector<int>& operand_2_shape,
const bool operand_1_is_tensor,
const bool operand_2_is_tensor,
const std::vector<int>& expected_operand_1_shape,
const std::vector<int>& expected_operand_2_shape,
absl::StatusCode expected_code =
absl::StatusCode::kOk,
const char* expected_error_msg_substr = "",
const int operand_1_batch_size = -1,
const int operand_2_batch_size = -1) {
auto create_tensor_or_weights = [](const std::vector<int>& shape,
bool is_tensor, int batch_size = -1) {
if (is_tensor) {
return TRT_TensorOrWeights(nvinfer1::DataType::kFLOAT,
CreateDims(shape), batch_size);
}
TRT_ShapedWeights weights;
weights.Shape() = CreateDims(shape);
return TRT_TensorOrWeights(weights);
};
nvinfer1::Dims operand_1_new_dims, operand_2_new_dims;
TRT_TensorOrWeights operand_1 = create_tensor_or_weights(
operand_1_shape, operand_1_is_tensor, operand_1_batch_size);
TRT_TensorOrWeights operand_2 = create_tensor_or_weights(
operand_2_shape, operand_2_is_tensor, operand_2_batch_size);
EXPECT_THAT(
GetTrtBroadcastShape(operand_1, operand_2, true,
true, &operand_1_new_dims,
&operand_2_new_dims),
StatusIs(expected_code, HasSubstr(expected_error_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(operand_1_new_dims, DimsAreArray(expected_operand_1_shape));
EXPECT_THAT(operand_2_new_dims, DimsAreArray(expected_operand_2_shape));
}
EXPECT_THAT(
GetTrtBroadcastShape(operand_2, operand_1, true,
true, &operand_2_new_dims,
&operand_1_new_dims),
StatusIs(expected_code, HasSubstr(expected_error_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(operand_1_new_dims, DimsAreArray(expected_operand_1_shape));
EXPECT_THAT(operand_2_new_dims, DimsAreArray(expected_operand_2_shape));
}
};
symmetric_test(
{1}, {1}, kIsNotTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting requires at least one of the operands be tensors");
symmetric_test({1, 1, 1}, {2}, kIsTensor, kIsNotTensor, {1, 1, 1}, {1, 1, 2});
symmetric_test({1, 1, 2}, {2}, kIsTensor, kIsNotTensor, {1, 1, 2}, {1, 1, 2});
symmetric_test({1, 3, 2}, {1}, kIsTensor, kIsNotTensor, {1, 3, 2}, {1, 1, 1});
symmetric_test({1, 1, 1}, {2, 3}, kIsTensor, kIsNotTensor, {1, 1, 1},
{1, 2, 3});
symmetric_test({1, 1, 1}, {2, 3, 4}, kIsTensor, kIsNotTensor, {1, 1, 1},
{2, 3, 4});
symmetric_test({1, 1, 1}, {1, 2, 3, 4}, kIsTensor, kIsNotTensor, {1, 1, 1},
{2, 3, 4});
symmetric_test({1, 3, 4}, {1, 2, 1, 4}, kIsTensor, kIsNotTensor, {1, 3, 4},
{2, 1, 4});
symmetric_test({1, 1, 1}, {2, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme");
symmetric_test({1, 1, 1}, {2, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme",
2);
symmetric_test({1, 1, 1}, {1, 1, 1, 1, 1}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 4 vs broadcast #dims 5)");
symmetric_test({3}, {1, 1, 3}, kIsTensor, kIsNotTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 2 vs broadcast #dims 3)",
2);
symmetric_test({1, 1, 1}, {1, 1}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 3 vs broadcast #dims 4)");
symmetric_test({1, 3}, {3}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 2 vs broadcast #dims 3)");
symmetric_test({1, 3, 4}, {2, 1, 4}, kIsTensor, kIsTensor, {1, 3, 4},
{2, 1, 4});
symmetric_test({1, 1, 1}, {1, 1, 1, 1}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims 4 vs broadcast #dims 5)");
symmetric_test({2, 3}, {7, 5}, kIsTensor, kIsTensor, {}, {},
absl::StatusCode::kInvalidArgument,
"Infeasible broadcast scheme");
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
TEST_F(ConverterTest, CreateConstantLayer) {
for (auto dtype : {nvinfer1::DataType::kFLOAT, nvinfer1::DataType::kINT32}) {
TRT_ShapedWeights weights =
weight_store_->GetTempWeights(dtype, CreateDims({2, 3, 5})).value();
ITensorProxyPtr tensor =
converter_->CreateConstantLayer(weights, CreateDims({3, 10}));
ASSERT_NE(nullptr, tensor->trt_tensor());
EXPECT_EQ(dtype, tensor->getType())
<< "Expected " << DebugString(dtype) << " vs. actual "
<< DebugString(tensor->getType());
EXPECT_THAT(tensor->getDimensions(), DimsAreArray({3, 10}));
}
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
class ConvertGraphDefToEngineTest : public ::testing::Test {
public:
Status RunConvertGraphDefToEngine(Scope* s) {
GraphDef gdef;
TF_EXPECT_OK(s->ToGraphDef(&gdef));
std::vector<PartialTensorShape> input_shapes;
int batch_size = -1;
for (const NodeDef& node : gdef.node()) {
absl::string_view node_name(node.name());
if (absl::ConsumePrefix(&node_name, IONamePrefixes::kInputPHName)) {
int port = -1;
EXPECT_TRUE(absl::SimpleAtoi(node_name, &port)) << node.name();
if (input_shapes.size() < port + 1) input_shapes.resize(port + 1);
input_shapes[port] =
PartialTensorShape(node.attr().at("shape").shape());
if (batch_size == -1) {
batch_size = input_shapes[port].dim_size(0);
} else {
EXPECT_EQ(batch_size, input_shapes[port].dim_size(0));
}
}
}
return ConvertGraphDefToEngine(
gdef, nullptr, TrtPrecisionMode::FP32, 1,
64 << 20, input_shapes, &logger_,
nullptr, nullptr, &engine_,
false, true,
nullptr, nullptr,
"TRTEngineOp_000_000", false);
}
protected:
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
private:
Logger& logger_ = *Logger::GetLogger();
};
TEST_F(ConvertGraphDefToEngineTest, IdentityGraph) {
Scope s = Scope::NewRootScope();
auto input =
ops::Placeholder(s.WithOpName(StrCat(IONamePrefixes::kInputPHName, 0)),
DT_FLOAT, ops::Placeholder::Shape({1, 1}));
auto output = ops::Identity(s.WithOpName("identity1"), input);
output = ops::Identity(s.WithOpName("identity2"), output);
output = ops::Identity(s.WithOpName(StrCat(IONamePrefixes::kOutputPHName, 0)),
output);
TF_EXPECT_OK(RunConvertGraphDefToEngine(&s));
}
Status GetShapeFromDataVec(DataVec input_data,
std::vector<TensorShape>* shape_vec) {
shape_vec->reserve(input_data.size());
std::transform(input_data.begin(), input_data.end(),
std::back_inserter(*shape_vec),
[](InputOutputData x) { return x.tensor.shape(); });
return OkStatus();
}
template <typename T>
inline absl::Span<const T> GetSpanForData(const InputOutputData& data) {
const auto& tensor_map = data.tensor.flat<T>();
return absl::Span<const T>(tensor_map.data(), tensor_map.size());
}
std::vector<float> GetDataAsFloat(InputOutputData& data) {
const auto dType = data.tensor.dtype();
if (dType == DT_FLOAT) {
auto span = GetSpanForData<float>(data);
return std::vector<float>(span.begin(), span.end());
}
if (dType == DT_HALF) {
return CastVector<Eigen::half, float>(GetSpanForData<Eigen::half>(data));
}
if (dType == DT_INT32) {
return CastVector<int32, float>(GetSpanForData<int32>(data));
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
if (dType == DT_BOOL) {
return CastVector<bool, float>(GetSpanForData<bool>(data));
}
#endif
LOG(FATAL) << "DataType not supported for testing " << DataTypeString(dType);
return {};
}
class OpConverterTest : public ::testing::Test {
public:
OpConverterTest()
: tensor_buffer_allocator_(new GpuManagedAllocator()),
scope_(Scope::NewRootScope()) {
QCHECK_EQ(0, cudaStreamCreate(&stream_));
Reset();
}
~OpConverterTest() noexcept override {
QCHECK_EQ(0, cudaStreamDestroy(stream_));
}
Status GetTensorOrWeights(const string& name, TRT_TensorOrWeights* output) {
return converter_->GetTensorOrWeights(name, output);
}
void Reset(TrtPrecisionMode precision_mode_to_test = TrtPrecisionMode::FP32,
TrtTestMode trt_mode = TrtTestMode::kImplicitBatch,
OpKernelContext* ctx = nullptr) {
converter_.reset(nullptr);
engine_.reset(nullptr);
converter_ =
std::move(Converter::Create(precision_mode_to_test,
false, &logger_,
trt_mode ==
TrtTestMode::kImplicitBatch,
"",
false, ctx)
.value());
scope_ = Scope::NewRootScope();
}
template <typename T>
Tensor AsTensor(gtl::ArraySlice<T> vals) {
Tensor ret(tensor_buffer_allocator_.get(), DataTypeToEnum<T>::value,
{static_cast<int64_t>(vals.size())});
std::copy_n(vals.data(), vals.size(), ret.flat<T>().data());
return ret;
}
template <typename T>
Tensor AsTensor(gtl::ArraySlice<T> vals,
const TensorShape& shape) {
Tensor ret(tensor_buffer_allocator_.get(), DataTypeToEnum<T>::value,
{static_cast<int64_t>(vals.size())});
CHECK(ret.CopyFrom(AsTensor(vals), shape));
return ret;
}
template <typename T, typename S>
void transformTensor(const std::vector<T>& vals, Tensor& ret) {
std::transform(vals.begin(), vals.end(), ret.flat<S>().data(),
[](const T in_val) -> S { return static_cast<S>(in_val); });
}
template <typename T, typename S>
void transformWeights(const std::vector<T>& vals,
TRT_ShapedWeights& weights) {
std::transform(vals.begin(), vals.end(), weights.GetPointer<S>(),
[](const T in_val) -> S { return static_cast<S>(in_val); });
}
template <typename T>
Tensor AsTensor(const std::vector<T>& vals,
const std::vector<int>& input_dims, DataType tf_type) {
Tensor ret(tensor_buffer_allocator_.get(), tf_type,
{static_cast<int64_t>(vals.size())});
if (tf_type == DT_FLOAT) {
transformTensor<T, float>(vals, ret);
} else if (tf_type == DT_HALF) {
transformTensor<T, Eigen::half>(vals, ret);
} else if (tf_type == DT_INT32) {
transformTensor<T, int32>(vals, ret);
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
} else if (tf_type == DT_BOOL) {
transformTensor<T, bool>(vals, ret);
#endif
} else {
LOG(FATAL) << "Cannot create tensor with type "
<< DataTypeString(tf_type);
}
TensorShape shape;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(input_dims, &shape));
CHECK(ret.CopyFrom(ret, shape));
return ret;
}
template <typename T>
Tensor AsTensor(const std::vector<int>& vals,
const std::vector<int>& input_dims, DataType tf_type) {
const auto& conv_vals = CastVector<int, T>(vals);
return AsTensor(conv_vals, input_dims, tf_type);
}
template <typename T>
Tensor ConstructTensor(int data_size, const T& value = T()) {
std::vector<T> values(data_size, value);
return AsTensor<T>(values);
}
template <typename T>
Tensor ConstructTensor(int data_size, const T& value, DataType tf_type) {
std::vector<T> values(data_size, value);
return AsTensor<T>(values, {data_size}, tf_type);
}
void CheckDataTypeMatches(const DataVec& datas) {
if (VLOG_IS_ON(2)) {
int nbBindings = engine_->getNbBindings();
VLOG(2) << "Number of engine bindings: " << nbBindings;
for (int i = 0; i < nbBindings; i++) {
VLOG(2) << "Binding " << i << " name: " << engine_->getBindingName(i);
}
}
for (const auto& data : datas) {
VLOG(2) << "Checking if data type matches for tensor " << data.name;
const int input_index = engine_->getBindingIndex(data.name.c_str());
ASSERT_NE(-1, input_index);
const nvinfer1::DataType trt_dtype =
engine_->getBindingDataType(input_index);
DataType tf_type;
TF_ASSERT_OK(TrtTypeToTfType(trt_dtype, &tf_type));
ASSERT_EQ(data.tensor.dtype(), tf_type)
<< DataTypeString(data.tensor.dtype()) << " vs. "
<< DataTypeString(tf_type);
}
}
Status BuildAndRun(const DataVec& input_data, DataVec* output_data,
const int batch_size = 1) {
std::vector<Converter::EngineOutputInfo> output_info;
for (const auto& data : *output_data) {
nvinfer1::DataType trt_type;
TF_RETURN_IF_ERROR(TfTypeToTrtType(data.tensor.dtype(), &trt_type));
output_info.push_back({data.name, data.name, trt_type});
}
TF_RETURN_IF_ERROR(converter_->RenameAndMarkOutputTensors(output_info));
if (engine_.get() != nullptr) {
return errors::Internal("Engine already exists");
}
TrtShapeOptimizationProfile profiles;
if (!converter_->use_implicit_batch()) {
std::vector<bool> input_mask(input_data.size());
for (int i = 0; i < input_data.size(); i++) {
input_mask[i] = (input_data[i].tensor.dtype() != DataType::DT_RESOURCE);
}
profiles.SetInputMask(input_mask);
profiles.SetShapeTensorMask(converter_->network());
TF_RETURN_IF_ERROR(profiles.CollectShapeValues(input_data));
std::vector<TensorShape> input_shapes;
TF_RETURN_IF_ERROR(GetShapeFromDataVec(input_data, &input_shapes));
profiles.AddShape(input_shapes);
std::vector<PartialTensorShape> input_partial_shapes;
TF_RETURN_IF_ERROR(
GetNetworkInputShapes(converter_->network(), &input_partial_shapes));
profiles.InitProfiles(input_partial_shapes, ProfileStrategy::kRange);
}
TF_RETURN_IF_ERROR(
converter_->BuildCudaEngine(&engine_,
batch_size,
1 << 26,
nullptr,
nullptr,
&profiles));
CHECK_NOTNULL(engine_.get());
CheckDataTypeMatches(input_data);
CheckDataTypeMatches(*output_data);
const int num_bindings = input_data.size() + output_data->size();
std::vector<void*> buffers(num_bindings);
if (engine_->getNbBindings() != num_bindings) {
return errors::Internal("Number of bindings do not match");
}
TrtUniquePtrType<nvinfer1::IExecutionContext> execution_context(
engine_->createExecutionContext());
TF_RETURN_IF_ERROR(
SetTrtEngineInputs(engine_.get(), execution_context.get(), 0, buffers,
converter_->use_implicit_batch(), batch_size,
profiles, nullptr, &input_data));
TF_RETURN_IF_ERROR(SetTrtEngineOutputs(
engine_.get(), execution_context.get(), 0, buffers,
converter_->use_implicit_batch(), batch_size, nullptr, output_data));
TF_RETURN_IF_ERROR(TrtEnqueue(execution_context.get(), buffers, stream_,
converter_->use_implicit_batch(),
batch_size));
cudaStreamSynchronize(stream_);
return OkStatus();
}
void AddTestTensorWithTFDims(
const string& name, const std::vector<int32>& dims,
nvinfer1::DataType trt_type = nvinfer1::DataType::kFLOAT,
Status add_input_status = OkStatus()) {
DataType tf_type;
TF_ASSERT_OK(TrtTypeToTfType(trt_type, &tf_type));
ops::Placeholder::Attrs attrs;
TF_EXPECT_OK(TensorShapeUtils::MakeShape(dims, &attrs.shape_));
auto input = ops::Placeholder(scope_.WithOpName(name), tf_type, attrs);
node_inputs_[name] = input.output;
auto dims_adap =
DimsAdapter::Create(attrs.shape_, converter_->use_implicit_batch());
if (converter_->use_implicit_batch() && !dims_adap.ok()) {
ASSERT_EQ(add_input_status, dims_adap.status());
return;
} else {
TF_EXPECT_OK(dims_adap.status());
}
if (!converter_->use_implicit_batch() || dims_adap->IsStatic()) {
int batch_size = dims.size() > 0 ? dims[0] : 0;
Status status = converter_->AddInputTensor(
name, trt_type, dims_adap->AsTrtDims(), batch_size);
ASSERT_EQ(add_input_status, status);
}
}
Status AddTensorOrWeights(const string& name, TRT_TensorOrWeights input) {
return converter_->AddTensorOrWeights(name, input);
}
void AddTestTensor(
const string& name, const std::vector<int32>& dims, int batch_size = 1,
nvinfer1::DataType trt_dtype = nvinfer1::DataType::kFLOAT) {
DimsAdapter adap(dims);
std::vector<int32_t> dims_vec;
TF_CHECK_OK(adap.Prepend(batch_size).Vector(&dims_vec));
AddTestTensorWithTFDims(name, dims_vec, trt_dtype);
if (adap.IsStatic()) {
ASSERT_EQ(batch_size, converter_->batch_size_);
}
}
template <typename T = int32>
void AddTestWeights(const string& name, const std::vector<int>& dims,
const std::vector<T>& values_inp, DataType tf_type,
bool fix_values = true) {
const DimsAdapter dims_adap(dims);
const int64_t num_elements = dims_adap.Volume();
std::vector<T> values(values_inp);
if (num_elements != values.size()) {
if (fix_values) {
AdjustVectorByDims<T>(values, num_elements, name, "AddTestWeights");
} else {
FAIL() << "Unable to create test weights: "
<< (num_elements > values.size() ? "not enough" : "to many")
<< " values specified: " << values.size() << " vs. "
<< num_elements << " defined by dims";
}
}
Tensor t = AsTensor<T>(values, dims, tf_type);
node_inputs_[name] = ops::Const(scope_.WithOpName(name), t);
nvinfer1::DataType dtype;
TF_ASSERT_OK(TfTypeToTrtType(tf_type, &dtype));
QCHECK_EQ(num_elements, values.size())
<< num_elements << " vs " << values.size();
TRT_ShapedWeights weights(dtype);
if (num_elements) {
weights =
converter_->weight_store_.GetTempWeights(dtype, dims_adap.AsTrtDims())
.value();
if (tf_type == DT_FLOAT) {
transformWeights<T, float>(values, weights);
} else if (tf_type == DT_HALF) {
transformWeights<T, Eigen::half>(values, weights);
} else if (tf_type == DT_INT32) {
transformWeights<T, int32>(values, weights);
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
} else if (tf_type == DT_BOOL) {
transformWeights<T, bool>(values, weights);
#endif
} else {
LOG(FATAL) << "Cannot create tensor with type "
<< DataTypeString(tf_type);
}
}
TF_EXPECT_OK(
converter_->AddTensorOrWeights(name, TRT_TensorOrWeights{weights}));
}
template <typename T = int32>
void AddTestWeights(const string& name, const std::vector<int>& dims,
const std::vector<T>& value, bool fix_values = true) {
AddTestWeights(name, dims, value, DataTypeToEnum<T>::value, fix_values);
}
Status RunValidation(const Node* node) {
grappler::GrapplerItem item;
TF_EXPECT_OK(scope_.ToGraphDef(&item.graph));
grappler::GraphProperties graph_properties(item);
TF_EXPECT_OK(graph_properties.InferStatically(true));
TrtNodeValidator validator(
graph_properties, converter_->precision_mode(),
false,
converter_->use_implicit_batch(),
false);
return validator.IsTensorRTCandidate(node);
}
void RunConversion(const Node* node,
absl::StatusCode expected_code = absl::StatusCode::kOk,
absl::string_view expected_msg_substr = "") {
EXPECT_THAT(converter_->ConvertNode(node->def()),
StatusIs(expected_code, HasSubstr(expected_msg_substr)));
if (expected_code == absl::StatusCode::kOk) {
EXPECT_THAT(converter_->network(), LayerNamesNonEmpty());
}
}
void RunValidationAndConversion(
const NodeDef& node_def,
absl::StatusCode expected_code = absl::StatusCode::kOk,
absl::string_view expected_msg_substr = "",
bool should_run_conversion = true) {
Graph* graph = scope_.graph();
Status status;
Node* node = graph->AddNode(std::move(node_def), &status);
TF_EXPECT_OK(status);
for (int i = 0; i < node_def.input().size(); ++i) {
const string& input_name = node_def.input(i);
const auto& itr = node_inputs_.find(input_name);
QCHECK(itr != node_inputs_.end());
const Output& input = itr->second;
graph->AddEdge(input.node(), input.index(), node, i);
}
status = RunValidation(node);
if (should_run_conversion && status.ok()) {
RunConversion(node, expected_code, expected_msg_substr);
} else {
EXPECT_THAT(status,
StatusIs(expected_code, HasSubstr(expected_msg_substr)));
}
}
void RunValidationAndConversion(
const NodeDef& node_def, const Status& status,
const std::string& output_name,
const std::vector<std::vector<int>>& exp_out_dims) {
RunValidationAndConversion(node_def,
static_cast<absl::StatusCode>(status.code()),
status.message(), true);
if (status.ok()) {
if (converter_->use_implicit_batch()) {
for (int i = 0; i < exp_out_dims.size(); i++) {
TRT_TensorOrWeights output;
string name = i == 0 ? output_name : StrCat(output_name, ":", i);
TF_EXPECT_OK(GetTensorOrWeights(name.c_str(), &output));
ASSERT_TRUE(output.is_tensor());
if (!exp_out_dims[i].empty()) {
auto out_dims = std::vector<int>(exp_out_dims[i].begin() + 1,
exp_out_dims[i].end());
VLOG(2) << "Testing output shape for tensor " << name;
EXPECT_THAT(output.tensor()->getDimensions(),
DimsAreArray(out_dims));
}
}
}
}
}
std::unordered_map<ITensorProxyPtr*, float>& quantization_ranges_proxy() {
return converter_->quantization_ranges_proxy_;
}
std::unordered_map<nvinfer1::ITensor*, float>& quantization_ranges() {
return converter_->quantization_ranges_;
}
protected:
template <typename T>
void AdjustVectorByDims(std::vector<T>& values, size_t num_elements,
const string& name, const char* callingFunc) {
const auto old_size = values.size();
if (num_elements > old_size) {
const std::vector<T> zeros(num_elements - old_size, 0);
values.reserve(num_elements);
values.insert(values.end(), zeros.begin(), zeros.end());
VLOG(2) << "In function " << callingFunc << " the vector '" << name
<< "' was extended by " << num_elements - old_size << " zeros";
} else {
values.resize(num_elements);
VLOG(2) << "Only first " << num_elements << " out of " << old_size
<< " elements of the vector '" << name
<< "' will be used in function" << callingFunc;
}
}
public:
std::unique_ptr<Converter> converter_;
protected:
Logger& logger_ = *Logger::GetLogger();
private:
TrtUniquePtrType<nvinfer1::ICudaEngine> engine_;
cudaStream_t stream_;
std::unique_ptr<Allocator> tensor_buffer_allocator_;
public:
Scope scope_;
protected:
std::unordered_map<string, Output> node_inputs_;
};
class VariableOpConverterTest : public OpConverterTest {
public:
void Reset(TrtPrecisionMode precision_mode_to_test = TrtPrecisionMode::FP32,
TrtTestMode trt_mode = TrtTestMode::kImplicitBatch) {
OpConverterTest::Reset(precision_mode_to_test, trt_mode, context_.get());
}
void CreateContext(const NodeDef& node_def, OpKernel** kernel,
OpKernelContext** context) {
std::unique_ptr<Device> device_(
DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
Device* device_ptr = device_.get();
device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device_));
managed_allocator_ = std::make_unique<GpuManagedAllocator>();
Allocator* allocator = managed_allocator_.get();
step_container_ =
std::make_unique<ScopedStepContainer>(0, [](const string&) {});
slice_reader_cache_wrapper_ =
std::make_unique<checkpoint::TensorSliceReaderCacheWrapper>();
flib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
thread_pool_ =
std::make_unique<thread::ThreadPool>(Env::Default(), "default",
1);
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
thread_pool_.get());
FunctionLibraryRuntime* flib = pflr_->GetFLR(device_ptr->name());
ResourceMgr* resource_mgr = device_ptr->resource_manager();
TF_CHECK_OK(NodeProperties::CreateFromNodeDef(
node_def, OpRegistry::Global(), &props_));
OpKernel* kernel_ptr = nullptr;
TF_CHECK_OK(CreateOpKernel(DEVICE_GPU, device_ptr, allocator, flib,
resource_mgr, props_, TF_GRAPH_DEF_VERSION,
&kernel_ptr));
op_kernel_ = std::unique_ptr<OpKernel>(kernel_ptr);
auto* dev_info = device_ptr->tensorflow_accelerator_device_info();
CHECK_NOTNULL(dev_info);
DeviceContext* device_context = dev_info->default_context;
params_.device = device_ptr;
params_.op_kernel = op_kernel_.get();
params_.resource_manager = resource_mgr;
params_.frame_iter = FrameAndIter(0, 0);
params_.inputs = inputs_;
params_.step_container = step_container_.get();
params_.function_library = flib;
params_.slice_reader_cache = slice_reader_cache_wrapper_.get();
params_.op_device_context = device_context;
context_ = std::make_unique<OpKernelContext>(¶ms_);
*kernel = op_kernel_.get();
*context = context_.get();
}
void AddTestResource(const string& name, const ResourceHandle& resource) {
node_inputs_[name] =
ops::Placeholder(scope_.WithOpName("my_handle"), DT_RESOURCE);
TF_EXPECT_OK(AddTensorOrWeights(name, TRT_TensorOrWeights{resource}));
}
private:
std::unique_ptr<DeviceMgr> device_mgr_;
std::unique_ptr<Allocator> managed_allocator_;
std::unique_ptr<ScopedStepContainer> step_container_;
std::unique_ptr<checkpoint::TensorSliceReaderCacheWrapper>
slice_reader_cache_wrapper_;
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<thread::ThreadPool> thread_pool_;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_;
OpKernelContext::Params params_;
std::unique_ptr<OpKernel> op_kernel_;
std::unique_ptr<OpKernelContext> context_;
std::shared_ptr<const NodeProperties> props_;
absl::InlinedVector<TensorValue, 4> inputs_;
};
struct TestParamBase {
std::vector<int> input_dims;
std::vector<int> partial_input_dims;
std::vector<int> expected_output_dims;
std::vector<int> param;
Status status;
Status runtime_status;
};
std::ostream& operator<<(std::ostream& os, const TestParamBase& p) {
os << "input_dims" << PrintToString(p.input_dims);
if (!p.partial_input_dims.empty()) {
os << ", partial_input_dims" << PrintToString(p.partial_input_dims);
}
if (!p.expected_output_dims.empty()) {
os << ", exp_out_dims" << PrintToString(p.expected_output_dims);
}
if (!p.param.empty()) {
os << ", param" << PrintToString(p.param);
}
os << ", " << p.status;
return os;
}
template <typename T>
const std::string get_debug_string_for_vector(const std::vector<T>& vector,
absl::string_view pComment,
absl::string_view name,
absl::string_view type = "") {
const std::string t1 = absl::StrCat(pComment, " '", name, "': Dims(nbDims=");
const std::string t2 = absl::StrJoin(vector, ",");
const std::string t3 = type != "" ? absl::StrCat(") of type ", type) : ")";
std::stringstream stream;
stream << t1 << vector.size() << ", d=" << t2 << t3;
return stream.str();
}
class ParameterizedOpConverterTestBase
: public OpConverterTest,
public ::testing::WithParamInterface<
std::tuple<TrtTestMode, DataType, TrtPrecisionMode>> {
public:
ParameterizedOpConverterTestBase()
: trt_mode_(std::get<0>(GetParam())),
tf_type_(std::get<1>(GetParam())),
converter_precision_(std::get<2>(GetParam())) {
LOG(INFO) << "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%";
LOG(INFO) << "tf_type_: " << DebugString(tf_type_);
LOG(INFO) << "trt_mode_: " << DebugString(trt_mode_);
LOG(INFO) << "converter_precision_: " << DebugString(converter_precision_);
LOG(INFO) << "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%";
}
void Reset() {
OpConverterTest::Reset(converter_precision_, trt_mode_);
input_data_.clear();
}
void Reset(TrtPrecisionMode precision) {
OpConverterTest::Reset(precision, trt_mode_);
input_data_.clear();
}
DataType get_tf_type() { return tf_type_; }
TrtTestMode get_trt_mode() { return trt_mode_; }
TrtPrecisionMode get_converter_precision() { return converter_precision_; }
template <typename T = int>
void AddTestTensor(const string& name, const std::vector<int32>& dims,
DataType tf_type, const std::vector<T>& values_inp,
const std::vector<int32>& partial_input_shape_dims = {},
Status add_input_status = OkStatus(),
bool fix_values = true) {
std::vector<T> values(values_inp);
VLOG(2) << "**** AddTestTensor for " << name
<< " ***** dims empty() = " << dims.empty()
<< " tf_type = " << DebugString(tf_type);
if (!dims.empty()) {
const auto num_elements = std::accumulate(
std::begin(dims), std::end(dims), 1, std::multiplies<double>());
if (!values.empty() && num_elements != values.size()) {
if (fix_values) {
AdjustVectorByDims(values, num_elements, name, "AddTestTensor");
} else {
LOG(WARNING) << "Expected Test Tensor Shape: " << DebugString(dims)
<< ", Received Input Tensor: " << DebugString(values);
}
}
}
std::vector<int32> partial_shape;
if (!partial_input_shape_dims.empty()) {
partial_shape = partial_input_shape_dims;
} else {
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_shape = std::vector<int32>(dims.size(), -1);
} else {
partial_shape = dims;
}
if (VLOG_IS_ON(2)) {
VLOG(2) << get_debug_string_for_vector(partial_shape,
"Using partial_shape for", name);
}
}
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(tf_type, &trt_type));
AddTestTensorWithTFDims(name, partial_shape, trt_type, add_input_status);
if (!values.empty()) {
if (VLOG_IS_ON(2)) {
VLOG(2) << get_debug_string_for_vector(values, "Adding test tensor for",
name, DataTypeString(tf_type));
}
InputOutputData data{name, AsTensor(values, dims, tf_type)};
VLOG(2) << "Added tensor: " << data.name << " with dtype "
<< DataTypeString(data.tensor.dtype());
input_data_.push_back(data);
}
}
template <typename T = int>
void AddTestTensor(const string& name, const std::vector<int32>& dims,
const std::vector<T>& values = {},
const std::vector<int32>& partial_input_shape_dims = {}) {
AddTestTensor<T>(name, dims, tf_type_, values, partial_input_shape_dims);
}
void BuildAndRun(const string& name,
const std::vector<std::vector<int>>& expected_output_dims,
const Status& expected_runtime_status,
const std::vector<Matcher<std::vector<float>>>& matcher,
const std::vector<DataType>& out_tf_types = {}) {
TensorShape shape;
const int n_output = expected_output_dims.size();
ASSERT_EQ(n_output, matcher.size());
DataVec output_data;
for (int i = 0; i < n_output; i++) {
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
string out_name = (i == 0) ? name : StrCat(name, ":", i);
DataType out_tf_type =
out_tf_types.size() > i ? out_tf_types[i] : tf_type_;
InputOutputData data{
out_name, ConstructTensor(shape.num_elements(), 0, out_tf_type)};
output_data.push_back(data);
}
const int batch_size =
input_data_.empty() ||
TensorShapeUtils::IsScalar(input_data_[0].tensor.shape())
? 1
: input_data_[0].tensor.shape().dim_size(0);
Status stat =
OpConverterTest::BuildAndRun(input_data_, &output_data, batch_size);
ASSERT_EQ(expected_runtime_status.ok(), stat.ok())
<< "expected status: " << expected_runtime_status
<< ", actual status: " << stat;
if (expected_runtime_status.ok() && stat.ok()) {
for (int i = 0; i < n_output; i++) {
TF_EXPECT_OK(
TensorShapeUtils::MakeShape(expected_output_dims[i], &shape));
EXPECT_TRUE(output_data[i].tensor.shape() == shape)
<< "Expected shape: " << shape.DebugString() << ", actual shape: "
<< output_data[i].tensor.shape().DebugString();
EXPECT_THAT(GetDataAsFloat(output_data[i]), matcher[i]);
}
}
}
void TestOpConverterMultiOut(
const NodeDef& node_def,
const std::vector<std::vector<int>>& expected_output_dims,
const Status& expected_conversion_status,
const Status& expected_runtime_status,
const std::vector<Matcher<std::vector<float>>>& matcher,
const std::vector<DataType>& out_tf_type = {}) {
const auto& name = node_def.name();
RunValidationAndConversion(node_def, expected_conversion_status, name,
expected_output_dims);
if (expected_conversion_status.ok()) {
BuildAndRun(name, expected_output_dims, expected_runtime_status, matcher,
out_tf_type);
}
}
void TestOpConverter(const NodeDef& node_def,
const std::vector<int>& expected_output_dims,
const Status& expected_conversion_status,
const Status& expected_runtime_status,
const Matcher<std::vector<float>>& matcher,
const std::vector<DataType>& out_tf_types = {}) {
TestOpConverterMultiOut(
node_def, std::vector<std::vector<int>>({expected_output_dims}),
expected_conversion_status, expected_runtime_status,
std::vector<Matcher<std::vector<float>>>({matcher}), out_tf_types);
}
protected:
const TrtTestMode trt_mode_;
const DataType tf_type_;
const TrtPrecisionMode converter_precision_;
DataVec input_data_;
};
template <typename T>
class OpConverter_UnaryTest : public ParameterizedOpConverterTestBase {
public:
template <typename S>
void RunTests(
const string& testName, const OperationMap<S>& map,
std::map<std::string,
std::pair<std::function<NodeDef(DataType)>, T (*)(T)>>& op_map,
const std::vector<T> input_values, const std::string input_name = "input",
float max_abs_error = 0.0001, bool nan_sensitive = true) {
auto p = TestParamBase{
{1, 1, 2, 3},
{},
{1, 1, 2, 3},
};
std::vector<string> ops_to_test;
for (auto& pair : map) {
ops_to_test.push_back(pair.first);
}
for (const string& op_name : ops_to_test) {
SCOPED_TRACE(op_name);
if (!op_map.count(op_name)) {
FAIL() << testName << " op test map does not contain op " << op_name;
}
const DataType tf_type = get_tf_type();
const NodeDef& node = op_map[op_name].first(tf_type);
runExpectedToFailTest(node, input_name, input_values, op_name);
Status conv_status = OkStatus();
if (trt_mode_ == TrtTestMode::kImplicitBatch &&
(op_name == "Sign" || op_name == "Round" ||
op_name == "LogicalNot")) {
const auto& err =
convert_not_supported_implicit(op_name, node.name(), "Unary");
conv_status = errors::Unimplemented(err);
}
Reset();
const DataType input_tf_type = op_name == "Cast" ? DT_HALF : tf_type;
const DataType output_tf_type = op_name == "Cast" ? DT_FLOAT : tf_type;
AddTestTensor("input", p.input_dims, input_tf_type, input_values);
std::vector<float> output;
std::transform(input_values.begin(), input_values.end(),
std::back_inserter(output), op_map[op_name].second);
TestOpConverter(node, p.expected_output_dims, conv_status, OkStatus(),
ArrayFloatNear(output, max_abs_error, nan_sensitive),
{output_tf_type});
}
}
void runExpectedToFailTest(const NodeDef& node_def,
const std::string& input_name,
const std::vector<T>& input_values,
const std::string& op_name) {
Reset();
std::string error =
"The input \"" + input_name + "\" for " + op_name + " must be a tensor";
AddTestWeights("input", {1, 2, 3}, input_values, get_tf_type());
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
error);
Reset();
std::vector<int32> dims{};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
dims = {1};
}
error = "At least 1 dimension is required for UNARY operation '" + op_name +
"'";
AddTestTensor("input", dims);
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
error);
}
};
template <typename T>
class OpConverter_BinaryTest : public ParameterizedOpConverterTestBase {
public:
template <typename S>
void RunTests(
const OperationMap<S>& map,
std::map<std::string,
std::pair<std::function<NodeDef(DataType)>, std::vector<T>>>&
op_test_info,
const std::vector<std::vector<T>>& data) {
const std::vector<DataType> bool_types{DT_BOOL}, default_types{};
std::vector<string> logical_ops{"Greater", "Less", "Equal"};
std::vector<string> combined_ops{"GreaterEqual", "LessEqual"};
const DataType tf_type = get_tf_type();
AttrValue dtype;
dtype.set_type(tf_type);
std::map<std::string, NodeDef> nodes;
for (const auto op_name : combined_ops) {
nodes[op_name] = MakeNodeDef("my_binary", op_name, {"input1", "input2"},
{{"T", dtype}});
}
for (auto& iter : map) {
const string& op_name = iter.first;
if (!op_test_info.count(op_name)) {
FAIL() << "Binary op test map does not contain op " << op_name;
}
const auto comb_op = find_name(op_name, combined_ops);
const auto& node_def =
comb_op ? nodes[op_name] : op_test_info[op_name].first(tf_type);
for (const bool operand_1_is_tensor : {true, false}) {
for (const bool operand_2_is_tensor : {true, false}) {
SCOPED_TRACE(StrCat(op_name, "_", operand_1_is_tensor ? "T" : "W",
operand_2_is_tensor ? "T" : "W"));
Reset();
if (!operand_1_is_tensor && !operand_2_is_tensor) {
runExpectedToFailTest(op_name, node_def);
continue;
}
const bool logical_op = comb_op || find_name(op_name, logical_ops);
auto conv_status = OkStatus();
if (tf_type == DT_BOOL || logical_op) {
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
conv_status =
errors::Unimplemented(convert_not_supported_implicit(
op_name, node_def.name(), "Binary"));
} else if (!logical_op &&
(!operand_1_is_tensor || !operand_2_is_tensor)) {
conv_status = errors::InvalidArgument(
"Both inputs of '", op_name, "' are expected to be tensors");
}
}
if (operand_1_is_tensor) {
AddTestTensor("input1", {2, 1, 2}, data[0]);
} else {
AddTestWeights("input1", {1, 2}, data[1], tf_type);
}
if (operand_2_is_tensor) {
AddTestTensor("input2", {2, 2, 1}, data[2]);
} else {
AddTestWeights("input2", {2, 1}, data[3], tf_type);
}
TestOpConverter(node_def, {2, 2, 2}, conv_status, OkStatus(),
ElementsAreArray(op_test_info[op_name].second),
logical_op ? bool_types : default_types);
}
}
}
}
void runExpectedToFailTest(const std::string& op_name, const NodeDef& node) {
AddTestWeights("input1", {1}, {1}, tf_type_);
AddTestWeights("input2", {1}, {1}, tf_type_);
const string error =
"Constant folding is falled back to TensorFlow, "
"binary op '" +
op_name + "' received both input as constant";
RunValidationAndConversion(node, absl::StatusCode::kUnimplemented, error);
}
};
typedef ParameterizedOpConverterTestBase OpConverter_FP32_Test;
typedef ParameterizedOpConverterTestBase OpConverter_FP32_FP16_Test;
typedef OpConverter_BinaryTest<float> OpConverter_FP32_FP16_BinaryTest;
typedef OpConverter_BinaryTest<int> OpConverter_BOOL_BinaryTest;
typedef ParameterizedOpConverterTestBase OpConverter_FP32_FP16_INT32_Test;
typedef ParameterizedOpConverterTestBase OpConverter_INT32_Test;
typedef OpConverter_UnaryTest<float> OpConverter_FP32_UnaryTest;
typedef OpConverter_UnaryTest<int> OpConverter_BOOL_Test;
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_INT32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF, DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_INT32_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_UnaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_BOOL_Test,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_BOOL),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_FP32_FP16_BinaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF),
::testing::Values(TrtPrecisionMode::FP32)));
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_BOOL_BinaryTest,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_BOOL),
::testing::Values(TrtPrecisionMode::FP32)));
template <typename T>
void CopyTensorElements(const Tensor& tensor, protobuf::RepeatedField<T>* out) {
out->Clear();
if (tensor.NumElements() == 0) return;
const auto flat = tensor.flat<T>();
int64 last_index = 0;
for (int64 i = 0; i < tensor.NumElements(); ++i) {
if (flat(i) != flat(last_index)) {
last_index = i;
}
}
int num_out_elements = last_index + 1;
out->Reserve(num_out_elements);
out->AddNAlreadyReserved(num_out_elements);
const T* src = flat.data();
T* dst = out->mutable_data();
std::copy(src, src + num_out_elements, dst);
}
template <DataType dtype, typename CType>
void TestConvertVariableV2(VariableOpConverterTest* test) {
struct TestParam {
string container;
string shared_name;
std::vector<int> dims;
float epsilon;
Status conversion_status;
};
std::vector<TestParam> test_param = {
{"", "var0", {}, 0.001, OkStatus()},
{"", "var0", {64}, 0.001, OkStatus()},
{"", "var0", {8, 16}, 0.001, OkStatus()},
{"box", "var", {8, 16}, 0.001, OkStatus()}};
for (auto p : test_param) {
NodeDef node_def;
std::vector<int64_t> dims_64(p.dims.begin(), p.dims.end());
TensorShape shape = TensorShape(absl::Span<int64_t>(dims_64));
TF_CHECK_OK(NodeDefBuilder("my_var", "VariableV2")
.Attr("dtype", dtype)
.Attr("shape", shape)
.Attr("container", p.container)
.Attr("shared_name", p.shared_name)
.Finalize(&node_def));
OpKernel* kernel;
OpKernelContext* context;
test->CreateContext(node_def, &kernel, &context);
test->Reset(TrtPrecisionMode::FP32, TrtTestMode::kDynamicShape);
int var_size = std::accumulate(p.dims.begin(), p.dims.end(), 1,
std::multiplies<int>());
std::vector<CType> expected_value;
expected_value.reserve(var_size);
for (int i = 0; i < var_size; i++) {
expected_value.push_back((CType)i);
}
kernel->Compute(context);
Tensor* tensor_ptr = context->mutable_output(0);
CHECK_NOTNULL(tensor_ptr);
AllocatorAttributes attr;
attr.set_gpu_compatible(true);
attr.set_nic_compatible(true);
OP_REQUIRES_OK(context,
context->allocate_temp(dtype, shape, tensor_ptr, attr));
auto tensor_flat = tensor_ptr->flat<CType>();
CHECK_NOTNULL(tensor_flat.data());
auto ret = cudaMemcpy(tensor_flat.data(), expected_value.data(),
expected_value.size() * sizeof(CType),
cudaMemcpyHostToDevice);
CHECK_EQ(ret, 0);
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_var", &output));
EXPECT_THAT(output.weights(),
ShapedWeightsHasDimsAndValues<CType>(p.dims, expected_value));
}
}
TEST_F(VariableOpConverterTest, ConvertVariableV2) {
TestConvertVariableV2<DT_FLOAT, float>(this);
TestConvertVariableV2<DT_HALF, Eigen::half>(this);
}
template <DataType dtype, typename CType>
void TestConvertReadVariableOp(VariableOpConverterTest* test) {
struct TestParam {
string container;
string name;
std::vector<int> dims;
float epsilon;
Status conversion_status;
};
std::vector<TestParam> test_param = {
{"", "var0", {}, 0.001, OkStatus()},
{"", "var0", {64}, 0.001, OkStatus()},
{"", "var0", {8, 16}, 0.001, OkStatus()},
{"box", "var", {8, 16}, 0.001, OkStatus()}};
for (auto p : test_param) {
NodeDefBuilder::NodeOut rvo_input =
NodeDefBuilder::NodeOut("my_handle", 0, DT_RESOURCE);
NodeDef node_def;
std::vector<int64_t> dims_64(p.dims.begin(), p.dims.end());
TensorShape shape =
TensorShape(gtl::ArraySlice<int64_t>(dims_64));
TF_CHECK_OK(NodeDefBuilder("my_var", "ReadVariableOp")
.Attr("dtype", dtype)
.Attr("_shape", shape)
.Input(rvo_input)
.Finalize(&node_def));
OpKernel* kernel;
OpKernelContext* context;
test->CreateContext(node_def, &kernel, &context);
test->Reset(TrtPrecisionMode::FP32, TrtTestMode::kDynamicShape);
int var_size = std::accumulate(p.dims.begin(), p.dims.end(), 1,
std::multiplies<int>());
std::vector<CType> expected_value;
expected_value.reserve(var_size);
for (int i = 0; i < var_size; i++) {
expected_value.push_back((CType)i);
}
DtypeAndPartialTensorShape dtype_and_shape;
dtype_and_shape.dtype = dtype;
TF_CHECK_OK(PartialTensorShape::BuildPartialTensorShape(
gtl::ArraySlice<int64_t>(dims_64),
&dtype_and_shape.shape));
ResourceHandle handle = MakeResourceHandle<Var>(
context, p.container, p.name,
std::vector<DtypeAndPartialTensorShape>{dtype_and_shape});
test->AddTestResource("my_handle", handle);
Var* resource = new Var(dtype);
TF_EXPECT_OK(CreateResource(context, handle, resource));
AllocatorAttributes attr_value;
attr_value.set_gpu_compatible(true);
attr_value.set_nic_compatible(true);
TF_EXPECT_OK(
context->allocate_temp(dtype, shape, resource->tensor(), attr_value));
auto tensor_flat = resource->tensor()->flat<CType>();
CHECK(tensor_flat.data());
auto ret = cudaMemcpy(tensor_flat.data(), expected_value.data(),
expected_value.size() * sizeof(CType),
cudaMemcpyHostToDevice);
CHECK_EQ(ret, 0);
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_var", &output));
EXPECT_THAT(output.weights(),
ShapedWeightsHasDimsAndValues<CType>(p.dims, expected_value));
}
}
TEST_F(VariableOpConverterTest, ConvertReadVariableOp) {
TestConvertReadVariableOp<DT_FLOAT, float>(this);
TestConvertReadVariableOp<DT_HALF, Eigen::half>(this);
}
template <DataType dtype, typename InputCType, typename OutputCType>
void TestConvertConst(OpConverterTest* test) {
NodeDef node_def;
node_def.set_name("my_const");
node_def.set_op("Const");
auto reset_and_test = [&node_def, test](
const Tensor& tensor, const bool as_tensor_content,
const std::vector<int>& expected_dims,
const std::vector<OutputCType>& expected_value) {
test->Reset();
TensorProto* tensor_attr =
(*node_def.mutable_attr())["value"].mutable_tensor();
tensor_attr->Clear();
if (as_tensor_content) {
tensor.AsProtoTensorContent(tensor_attr);
} else {
tensor.shape().AsProto(tensor_attr->mutable_tensor_shape());
tensor_attr->set_dtype(tensor.dtype());
if (tensor.dtype() == DT_FLOAT) {
CopyTensorElements<float>(tensor, tensor_attr->mutable_float_val());
} else if (tensor.dtype() == DT_INT32) {
CopyTensorElements<int32>(tensor, tensor_attr->mutable_int_val());
} else {
tensor.AsProtoField(tensor_attr);
}
}
test->RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(test->GetTensorOrWeights("my_const", &output));
EXPECT_THAT(output.weights(), ShapedWeightsHasDimsAndValues<OutputCType>(
expected_dims, expected_value));
};
auto& attr = *node_def.mutable_attr();
attr["dtype"].set_type(dtype);
{
Tensor t(dtype);
reset_and_test(t, false, {}, {});
}
{
Tensor t = test::AsScalar<InputCType>(12);
std::vector<int> expected_dims{1};
expected_dims.clear();
reset_and_test(t, false, expected_dims, {12});
reset_and_test(t, true, expected_dims, {12});
}
{
Tensor t = test->AsTensor<InputCType>({1, 2});
reset_and_test(t, false, {2}, {1, 2});
reset_and_test(t, true, {2}, {1, 2});
}
{
Tensor t =
test->AsTensor<InputCType>({1, 2, 3, 4, 5, 6}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {1, 2, 3, 4, 5, 6});
reset_and_test(t, true, {2, 3}, {1, 2, 3, 4, 5, 6});
}
{
Tensor t =
test->AsTensor<InputCType>({1, 1, 1, 1, 1, 1}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {1, 1, 1, 1, 1, 1});
reset_and_test(t, true, {2, 3}, {1, 1, 1, 1, 1, 1});
}
{
Tensor t =
test->AsTensor<InputCType>({2, 2, 1, 1, 1, 1}, TensorShape({2, 3}));
reset_and_test(t, false, {2, 3}, {2, 2, 1, 1, 1, 1});
reset_and_test(t, true, {2, 3}, {2, 2, 1, 1, 1, 1});
}
}
TEST_F(OpConverterTest, ConvertConst) {
{
Reset();
NodeDef node_def = MakeConstNodeDef<double>("my_const", {});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Unsupported tensorflow data type double");
}
{
Reset();
Tensor tensor =
AsTensor<int64_t>({1, std::numeric_limits<int64_t>::max(), 1, 1, 1,
std::numeric_limits<int64_t>::lowest()},
TensorShape({2, 3}));
NodeDef node_def;
node_def.set_name("my_const");
node_def.set_op("Const");
(*node_def.mutable_attr())["dtype"].set_type(DT_INT64);
TensorProto* tensor_attr =
(*node_def.mutable_attr())["value"].mutable_tensor();
tensor_attr->Clear();
tensor.AsProtoTensorContent(tensor_attr);
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"outside the range of int32");
}
TestConvertConst<DT_FLOAT, float, float>(this);
TestConvertConst<DT_INT8, int8, int32>(this);
TestConvertConst<DT_UINT8, uint8, int32>(this);
TestConvertConst<DT_INT16, int16, int32>(this);
TestConvertConst<DT_UINT16, uint16, int32>(this);
TestConvertConst<DT_INT32, int32, int32>(this);
TestConvertConst<DT_UINT32, uint32, int32>(this);
TestConvertConst<DT_INT64, int64, int32>(this);
TestConvertConst<DT_UINT64, uint64, int32>(this);
}
template <typename T>
NodeDef CreateFusedBatchNormOp(DataType tf_type, std::string data_format,
bool is_training, float epsilon) {
Scope s = Scope::NewRootScope();
auto x = ops::Placeholder(s.WithOpName("x"), tf_type);
auto scale = ops::Placeholder(s.WithOpName("scale"), tf_type);
auto offset = ops::Placeholder(s.WithOpName("offset"), tf_type);
auto mean = ops::Placeholder(s.WithOpName("mean"), tf_type);
auto variance = ops::Placeholder(s.WithOpName("variance"), tf_type);
typename T::Attrs attrs;
attrs.data_format_ = data_format;
attrs.is_training_ = is_training;
if (epsilon > 0) {
attrs.epsilon_ = epsilon;
} else {
EXPECT_GE(epsilon, 0);
}
return T(s.WithOpName("my_batchnorm"), x, scale, offset, mean, variance,
attrs)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_Test, ConvertFusedBatchNorm) {
using OpFunc = std::function<NodeDef(DataType, std::string, bool, float)>;
std::vector<OpFunc> get_node_def_vec{
CreateFusedBatchNormOp<ops::FusedBatchNorm>,
CreateFusedBatchNormOp<ops::FusedBatchNormV2>,
CreateFusedBatchNormOp<ops::FusedBatchNormV3>};
struct TestParam {
std::string data_format;
int tensor_input_idx;
bool is_training;
float epsilon;
Status conversion_status;
bool keep_channel_unknown;
};
struct NodeInput {
std::string name;
std::vector<int> dims;
std::vector<float> val;
};
std::vector<NodeInput> node_input_nchw{
{"x", {2, 3, 2, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}},
{"scale", {3}, {7, 8, 9}},
{"offset", {3}, {10, 20, 30}},
{"mean", {3}, {1, 2, 3}},
{"variance", {3}, {4, 5, 6}}};
std::vector<NodeInput> node_input_nhwc{
{"x", {2, 2, 1, 3}, {1, 3, 5, 2, 4, 6, 7, 9, 11, 8, 10, 12}},
{"scale", {3}, {7, 8, 9}},
{"offset", {3}, {10, 20, 30}},
{"mean", {3}, {1, 2, 3}},
{"variance", {3}, {4, 5, 6}}};
std::vector<float> expected_output_nchw{
10.0, 13.495633, 23.574135, 27.148273, 37.342354, 41.013527,
30.9738, 34.469433, 45.018955, 48.59309, 59.369415, 63.04059};
std::vector<float> expected_output_nhwc{
10.0, 23.574135, 37.342354, 13.495633, 27.148273, 41.013527,
30.9738, 45.018955, 59.369415, 34.469433, 48.59309, 63.04059};
for (auto get_node_def : get_node_def_vec) {
NodeDef tmp_node_def = get_node_def(tf_type_, "NCHW", true, 0);
std::string op_name = tmp_node_def.op();
std::vector<TestParam> test_param{
{"NCHW", 0, true, 0,
errors::Unimplemented(
StrCat(op_name, " only supports is_training=false"))},
{"NCHW", 1, false, 0,
errors::Unimplemented(StrCat("The input \"scale\" for ", op_name,
" must be a constant"))},
{"NCHW", 2, false, 0,
errors::Unimplemented(StrCat("The input \"offset\" for ", op_name,
" must be a constant"))},
{"NCHW", 3, false, 0,
errors::Unimplemented(StrCat("The input \"mean\" for ", op_name,
" must be a constant"))},
{"NCHW", 4, false, 0,
errors::Unimplemented(StrCat("The input \"variance\" for ", op_name,
" must be a constant"))},
{"NCHW", 0, false, 0.01},
{"NHWC", 0, false, 0.01}};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
test_param.push_back(
{"NCHW", 0, false, 0.01,
errors::InvalidArgument("Channel dimension must be static"), true});
test_param.push_back(
{"NHWC", 0, false, 0.01,
errors::InvalidArgument("Channel dimension must be static"), true});
}
for (auto p : test_param) {
Reset();
NodeDef node_def =
get_node_def(tf_type_, p.data_format, p.is_training, p.epsilon);
std::vector<NodeInput> node_input =
p.data_format == "NCHW" ? node_input_nchw : node_input_nhwc;
std::vector<float> expected_output =
p.data_format == "NCHW" ? expected_output_nchw : expected_output_nhwc;
for (int i = 0; i < node_input.size(); i++) {
if (i == 0 || i == p.tensor_input_idx) {
Status expected_status =
(i != 0 && trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::InvalidArgument(
batch_size_error(node_input[i].name,
"Provided batch size does not match "
"converter batch size: 3 vs 2"))
: OkStatus();
std::vector<int> partial_input_shape;
if (i == 0 && trt_mode_ == TrtTestMode::kDynamicShape &&
!p.keep_channel_unknown) {
partial_input_shape.resize(4, -1);
int channel_dim = (p.data_format == "NCHW" ? 1 : 3);
partial_input_shape[channel_dim] = node_input[i].dims[channel_dim];
}
AddTestTensor(node_input[i].name, node_input[i].dims, tf_type_,
node_input[i].val, partial_input_shape,
expected_status);
} else {
AddTestWeights(node_input[i].name, node_input[i].dims,
node_input[i].val, tf_type_);
}
}
TestOpConverter(node_def, node_input[0].dims, p.conversion_status,
OkStatus(), ArrayFloatNear(expected_output));
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertTranspose) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto transpose = ops::Transpose(s.WithOpName("my_transpose"), input, weights);
const NodeDef& node_def = transpose.operation.node()->def();
std::vector<TestParamBase> test_params = {
TestParamBase{{3, 1, 2, 1},
{},
{},
{},
Status(absl::StatusCode::kUnimplemented,
"The input \"perm\" for Transpose must be a "
"constant")},
TestParamBase{{1, 1, 2, 3},
{},
{},
{0, 1, 2},
Status(absl::StatusCode::kInvalidArgument,
"Rank of perm for transpose does not match with "
"that of the input.")},
TestParamBase{
{1, 1, 2, 3},
{},
{3, 2, 1, 1},
{3, 2, 1, 0},
(trt_mode_ == TrtTestMode::kImplicitBatch)
? Status(absl::StatusCode::kUnimplemented,
"Transpose at batch dimension is not supported")
: OkStatus()},
TestParamBase{{1, 1, 2, 3}, {}, {1, 3, 1, 2}, {0, 3, 1, 2}},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
test_params.push_back(TestParamBase{
{1, 1, 2, 3}, {-1, 1, 2, -1}, {1, 3, 1, 2}, {0, 3, 1, 2}});
}
std::vector<float> expected_values{1, 4, 2, 5, 3, 6};
for (auto p : test_params) {
SCOPED_TRACE(p);
Reset();
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
p.partial_input_dims);
if (p.param.empty()) {
AddTestTensor("weights", {3});
} else {
AddTestWeights<int32>("weights", {static_cast<int>(p.param.size())},
p.param);
}
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray(expected_values));
}
}
TEST_P(OpConverter_FP32_Test, ConvertTile) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto tile = ops::Tile(s.WithOpName("my_tile"), input, weights);
const NodeDef& node_def = tile.operation.node()->def();
struct TileParam {
std::vector<int> input_dims;
std::vector<int> multiplier;
std::vector<float> tensor;
std::vector<int> expected_output_dims;
std::vector<int> expected_results;
int test_ID;
Status status;
};
std::vector<TileParam> test_params = {
TileParam{{1, 2, 3},
{1, -2, 1},
{},
{},
{},
1,
Status(absl::StatusCode::kInvalidArgument,
"All replications of the Tile operation in "
"'my_tile' should be positive, got (1, -2, 1).")},
TileParam{{1, 2, 3},
{1, 2, 1, 3},
{0, 1, 2, 3, 4, 5},
{},
{},
2,
Status(absl::StatusCode::kInvalidArgument,
"The length of the replication vector (4) of the "
"Tile operation in 'my_tile' is expected to be equal "
"to the rank of the input vector (3).")},
TileParam{{1, 2},
{1, 3},
{2, 3},
{1, 6},
{2, 3, 2, 3, 2, 3}},
TileParam{{1, 2, 3},
{1, 2, 1},
{0, 1, 2, 3, 4, 5},
{1, 4, 3},
{0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5}},
TileParam{{1, 2, 3},
{1, 1, 2},
{0, 1, 2, 3, 4, 5},
{1, 2, 6},
{0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5}},
TileParam{{1, 2, 3},
{1, 2, 2},
{0, 1, 2, 3, 4, 5},
{1, 4, 6},
{0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5,
0, 1, 2, 0, 1, 2, 3, 4, 5, 3, 4, 5}},
TileParam{{1, 2},
{2, 3},
{2, 3},
{2, 6},
{2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3}},
TileParam{{1, 2, 3},
{2, 2, 1},
{0, 1, 2, 3, 4, 5},
{2, 4, 3},
{0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5}},
};
for (bool multiplier_is_tensor : {true, false}) {
for (bool input_is_tensor : {true, false}) {
for (auto p : test_params) {
std::vector<int> num_mults = {static_cast<int>(p.multiplier.size())};
std::vector<int> partial_input_dims = {};
if (multiplier_is_tensor) {
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
p.status =
Status(absl::StatusCode::kInvalidArgument,
"Conversion for Tile is not implemented for multipliers "
"passed as a tensor in implicit batch mode");
num_mults = {1, static_cast<int>(p.multiplier.size())};
} else {
if (p.test_ID == 1) {
continue;
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_input_dims = num_mults;
p.status = OkStatus();
}
if (p.test_ID == 2) {
p.status = Status(absl::StatusCode::kInvalidArgument,
"When replications are defined as a tensor, "
"the number of its elements (4) must be equal "
"to the rank of the input tensor (3).");
}
}
} else {
if (trt_mode_ == TrtTestMode::kImplicitBatch && p.multiplier[0] > 1) {
p.status =
Status(absl::StatusCode::kUnimplemented,
"The Tile operation along "
"the batch dimension in 'my_tile' is not implemented.");
}
}
Reset();
if (input_is_tensor) {
AddTestTensor("input", p.input_dims, p.tensor);
} else {
AddTestWeights("input", p.input_dims, p.tensor, tf_type_);
}
if (multiplier_is_tensor) {
AddTestTensor<int>("weights", num_mults, DT_INT32, p.multiplier,
partial_input_dims);
} else {
AddTestWeights<int32>("weights", num_mults, p.multiplier);
}
TestOpConverter(node_def, p.expected_output_dims, p.status, OkStatus(),
ElementsAreArray(p.expected_results));
}
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertReshape) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto reshape = ops::Reshape(s.WithOpName("my_reshape"), input, weights);
const NodeDef& node_def = reshape.operation.node()->def();
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"The input \"shape\" for Reshape must be a constant in implicit batch "
"mode");
} else if (!IS_TRT_VERSION_GE(7, 1, 3, 0)) {
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Non constant shape input tensor for Reshape requires minimum TRT "
"7.1.3");
}
Status reshape_from_scalar_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Internal(
"Failed to convert at least one input to a TRT_TensorOrWeights:"
" Scalar input tensor is not supported since the first "
"dimension is treated as batch dimension by TRT")
: OkStatus();
Status add_scalar_tensor_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument(
"removing first dim requires explicit batch dimension")
: OkStatus();
Status reshape_to_scalar_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("Reshape to shape=[] is not supported")
: OkStatus();
Status reshape_batch_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("Reshape on batch dimension is not supported")
: OkStatus();
struct TestParams {
std::vector<int> tensor_dims;
std::vector<int> shape;
std::vector<int> expected_shape;
Status conversion_status;
Status runtime_status;
std::vector<int> shape_prof;
Status add_test_tensor_status;
};
std::vector<TestParams> params = {
TestParams{{},
{1, 1},
{},
reshape_from_scalar_status,
{},
{},
add_scalar_tensor_status},
TestParams{{1, 1}, {}, {}, reshape_to_scalar_status},
TestParams{{1, 1, 2, 3}, {3, 1, 1, 2}, {}, reshape_batch_status},
TestParams{{2, 1, 2, 3}, {-1, 1, 4}, {3, 1, 4}, reshape_batch_status},
TestParams{{1, 1, 2, 3}, {-1, 1, 3, 2}, {1, 1, 3, 2}},
TestParams{{1, 1, 2, 3}, {1, 1, -1}, {1, 1, 6}},
TestParams{{1, 1, 2, 3}, {1, 1, 3, 2}},
TestParams{{2, 1, 2, 3}, {2, 1, 3, 2}},
TestParams{{1, 1, 1}, {1}},
TestParams{{1}, {1, 1}},
TestParams{{2, 1, 1}, {2}},
TestParams{{2}, {2, 1}},
};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
params.push_back(TestParams{{},
{},
{},
reshape_from_scalar_status,
{},
{},
add_scalar_tensor_status});
}
std::vector<bool> shape_input_options(1, true);
if (trt_mode_ != TrtTestMode::kImplicitBatch &&
IS_TRT_VERSION_GE(7, 1, 3, 0)) {
shape_input_options.push_back(false);
}
for (auto p : params) {
for (auto shape_as_weight : shape_input_options) {
std::ostringstream oss;
oss << "shape " << PrintToString(p.shape);
SCOPED_TRACE(StrCat(oss.str(), shape_as_weight ? " weight" : " tensor"));
if (!shape_as_weight && p.shape.empty()) {
p.conversion_status = errors::Unimplemented(
"Reshape with dynamic input requires 1D input tensor");
}
Reset();
const int n_elements =
std::accumulate(p.tensor_dims.begin(), p.tensor_dims.end(), 1,
std::multiplies<int>());
std::vector<float> input_vec(n_elements);
std::iota(input_vec.begin(), input_vec.end(), 1);
AddTestTensor("input", p.tensor_dims, tf_type_, input_vec, {},
p.add_test_tensor_status);
if (shape_as_weight) {
AddTestWeights<int32>("weights", {static_cast<int>(p.shape.size())},
p.shape);
} else {
std::vector<int32> dims;
std::vector<int32> values{p.shape};
if (!p.shape.empty()) {
dims.push_back(p.shape.size());
} else {
values.push_back(1);
}
AddTestTensor("weights", dims, DT_INT32, values, dims);
}
std::vector<int> expected_shape =
p.expected_shape.empty() ? p.shape : p.expected_shape;
VLOG(2) << "Calling TestOpConverter";
TestOpConverter(node_def, expected_shape, p.conversion_status,
p.runtime_status, ElementsAreArray(input_vec));
}
}
}
TEST_P(OpConverter_FP32_Test, ConvertShape) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto shape = ops::Shape(s.WithOpName("my_shape"), input);
const NodeDef& node_def = shape.operation.node()->def();
Status conversion_status =
(trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::Unimplemented(
"Shape is only supported for explicit batch mode.")
: OkStatus();
std::vector<TestParamBase> test_params = {
#if !IS_TRT_VERSION_GE(7, 1, 3, 0)
TestParamBase{{1, 2, 3}, {}, {3}, {}, conversion_status},
#endif
TestParamBase{{1, 2, 3}, {}, {3}, {1}, conversion_status},
};
auto input_is_weight = [](const TestParamBase p) { return !p.param.empty(); };
for (auto p : test_params) {
SCOPED_TRACE(p);
Reset();
int n_elements = 0;
if (input_is_weight(p) || trt_mode_ != TrtTestMode::kExplicitBatch) {
n_elements = std::accumulate(p.input_dims.begin(), p.input_dims.end(), 1,
std::multiplies<int>());
}
std::vector<float> input_val(n_elements, 1);
if (!input_is_weight(p)) {
AddTestTensor("input", p.input_dims, input_val);
} else {
AddTestWeights("input", p.input_dims, input_val, tf_type_);
}
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray(p.input_dims),
{DT_INT32});
}
}
struct MatMulTestParams {
std::vector<int> shape_a;
std::vector<int> values_a;
bool transpose_a;
std::vector<int> shape_b;
std::vector<int> values_b;
bool transpose_b;
std::vector<int> expected_shape;
std::vector<int> expected_output;
};
void TestMatMulHelper(
ParameterizedOpConverterTestBase* test,
const std::function<NodeDef(DataType, bool, bool)>& get_matmul,
const std::vector<MatMulTestParams>& params) {
{
test->Reset();
NodeDef node_def = get_matmul(DT_INT32, false, false);
test->AddTestTensor("input", {1, 2}, DT_INT32, {});
test->AddTestWeights<int32>("weights", {2, 1}, {3, 5});
const std::vector<DataType> allowed_types{DT_FLOAT, DT_HALF};
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
convert_not_supported_dtype_msg(allowed_types, DT_INT32, node_def));
}
std::vector<bool> a_test_partial_shape_values{false};
if (test->get_trt_mode() == TrtTestMode::kDynamicShape) {
a_test_partial_shape_values.push_back(true);
}
for (auto p : params) {
for (bool a_is_tensor : {true, false}) {
for (bool b_is_tensor : {true, false}) {
for (bool a_partial_shape : a_test_partial_shape_values) {
if (a_partial_shape && !a_is_tensor) {
continue;
}
if (!a_is_tensor && !b_is_tensor) {
continue;
}
SCOPED_TRACE(StrCat("A", p.transpose_a ? ".T" : "", " is ",
a_is_tensor ? "tensor" : "weight", ", B",
p.transpose_b ? ".T" : "", " is ",
b_is_tensor ? "tensor " : "weight, rank A ",
p.shape_a.size(), ", rank B ", p.shape_b.size()));
test->Reset();
NodeDef node_def =
get_matmul(test->get_tf_type(), p.transpose_a, p.transpose_b);
const bool is_batch_matmul = node_def.op() == "BatchMatMul";
if (a_is_tensor) {
if (a_partial_shape) {
std::vector<int> partial_shape(p.shape_a.size(), -1);
int k = p.shape_a.size() - 1;
partial_shape.at(k) = p.shape_a.at(k);
test->AddTestTensor("input", p.shape_a, test->get_tf_type(),
p.values_a, partial_shape);
} else {
test->AddTestTensor("input", p.shape_a, p.values_a);
}
} else {
test->AddTestWeights("input", p.shape_a, p.values_a,
test->get_tf_type());
}
if (b_is_tensor) {
if (a_is_tensor && p.shape_a[0] != p.shape_b[0] &&
test->get_trt_mode() == TrtTestMode::kImplicitBatch) {
VLOG(2) << "Skipping test with inpcompatible batch dimensions";
continue;
}
test->AddTestTensor("weights", p.shape_b, p.values_b);
} else {
test->AddTestWeights("weights", p.shape_b, p.values_b,
test->get_tf_type());
}
Status conversion_status = OkStatus();
if (test->get_trt_mode() == TrtTestMode::kImplicitBatch) {
if (is_batch_matmul) {
if (a_is_tensor && p.shape_a.size() < p.shape_b.size()) {
conversion_status = errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims ",
p.shape_a.size(), " vs broadcast #dims ", p.shape_b.size(),
")");
}
if (b_is_tensor && p.shape_b.size() < p.shape_a.size()) {
conversion_status = errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported "
"(tensor #dims ",
p.shape_b.size(), " vs broadcast #dims ", p.shape_a.size(),
")");
}
if ((!a_is_tensor || !b_is_tensor) && p.shape_a[0] != 1) {
conversion_status = errors::Unimplemented(
"TensorRT does not support batched constants in implicit "
"batch mode.");
}
} else if ((a_is_tensor && p.shape_a.size() <= 2 &&
(p.transpose_a || b_is_tensor)) ||
(b_is_tensor && p.shape_b.size() <= 2)) {
conversion_status = errors::InvalidArgument(
"MatMul with 2D tensors requires explicit batch mode, or that"
" tensor A is not transposed and B is a constant tensor.");
}
}
test->TestOpConverter(node_def, p.expected_shape, conversion_status,
OkStatus(),
ElementsAreArray(p.expected_output));
if (!conversion_status.ok()) {
VLOG(2) << "Converted with status " << conversion_status;
}
VLOG(2) << "== Finished test iteration ==";
}
}
}
}
}
template <typename LayerType>
void CheckAddedLayers(OpConverterTest* test, bool expect_found) {
bool layer_found = false;
for (int i = 0; i < test->converter_->network()->getNbLayers(); i++) {
nvinfer1::ILayer* layer = test->converter_->network()->getLayer(i);
if (dynamic_cast<LayerType*>(layer)) {
layer_found = true;
}
}
EXPECT_EQ(expect_found, layer_found);
}
std::vector<MatMulTestParams> GetMatMulTestParams() {
std::vector<MatMulTestParams> params{
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {2, 3, 6, 11}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {1, 3, 3, 13}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {0, 1, 2, 3}, false,
{2, 2}, {4, 6, 6, 10}},
MatMulTestParams{{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {0, 1, 2, 3}, true,
{2, 2}, {2, 6, 3, 11}},
MatMulTestParams{{2, 3}, {0, 1, 2, 3, 4, 5}, false,
{2, 3}, {1, 2, 3, 4, 5, 6}, true,
{2, 2}, {8, 17, 26, 62}},
MatMulTestParams{{2, 3}, {0, 1, 2, 3, 4, 5}, true,
{2, 3}, {1, 2, 3, 4, 5, 6}, false,
{3, 3}, {12, 15, 18, 17, 22, 27, 22, 29, 36}},
MatMulTestParams{{3, 2}, {0, 1, 2, 3, 4, 5}, false,
{2, 3}, {1, 2, 3, 4, 5, 6}, false,
{3, 3}, {4, 5, 6, 14, 19, 24, 24, 33, 42}},
MatMulTestParams{{3, 2}, {0, 1, 2, 3, 4, 5}, true,
{2, 3}, {1, 2, 3, 4, 5, 6}, true,
{2, 2}, {16, 34, 22, 49}},
};
return params;
}
TEST_P(OpConverter_FP32_Test, ConvertMatMul) {
auto get_matmul_nodedef = [](DataType dtype, bool transpose_a,
bool transpose_b) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto weights = ops::Placeholder(s.WithOpName("weights"), dtype);
const auto matmul_attrs =
ops::MatMul::TransposeA(transpose_a).TransposeB(transpose_b);
auto matmul =
ops::MatMul(s.WithOpName("my_matmul"), input, weights, matmul_attrs);
return matmul.operation.node()->def();
};
TestMatMulHelper(this, get_matmul_nodedef, GetMatMulTestParams());
}
TEST_P(OpConverter_FP32_Test, ConvertBatchMatMul) {
auto get_batch_matmul_nodedef = [](DataType dtype, bool transpose_a,
bool transpose_b) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto weights = ops::Placeholder(s.WithOpName("weights"), dtype);
const auto matmul_attrs =
ops::BatchMatMul::AdjX(transpose_a).AdjY(transpose_b);
auto matmul = ops::BatchMatMul(s.WithOpName("my_matmul"), input, weights,
matmul_attrs);
return matmul.operation.node()->def();
};
std::vector<MatMulTestParams> params_2d = GetMatMulTestParams();
std::vector<MatMulTestParams> params;
params.reserve(params_2d.size() * 3 + 1);
auto insert_ones = [](std::vector<int> v, int n) {
std::vector<int> ones(n, 1);
ones.insert(ones.end(), v.begin(), v.end());
return ones;
};
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[](MatMulTestParams p) {
p.shape_a.insert(p.shape_a.begin(), 1);
p.shape_b.insert(p.shape_b.begin(), 1);
p.expected_shape.insert(p.expected_shape.begin(), 1);
return p;
});
params.push_back(
MatMulTestParams{{2, 2, 2}, {0, 1, 2, 3, 0, 1, 2, 3}, false,
{2, 2, 2}, {0, 1, 2, 3, 0, 1, 2, 3}, false,
{2, 2, 2}, {2, 3, 6, 11, 2, 3, 6, 11}}
);
params.push_back(
MatMulTestParams{{2, 2, 3}, {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5},
false,
{2, 2, 3}, {1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6}, true,
{2, 2, 2}, {8, 17, 26, 62, 8, 17, 26, 62}});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 2);
p.shape_b = insert_ones(p.shape_b, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a = insert_ones(p.shape_a, 1);
p.shape_b = insert_ones(p.shape_b, 2);
p.expected_shape = insert_ones(p.expected_shape, 2);
return p;
});
std::transform(params_2d.begin(), params_2d.end(), std::back_inserter(params),
[insert_ones](MatMulTestParams p) {
p.shape_a.insert(p.shape_a.begin(), 2);
p.values_a.reserve(p.values_a.size() * 2);
p.values_a.insert(p.values_a.end(), p.values_a.begin(),
p.values_a.end());
p.shape_b.insert(p.shape_b.begin(), 2);
p.values_b.reserve(p.values_b.size() * 2);
p.values_b.insert(p.values_b.end(), p.values_b.begin(),
p.values_b.end());
p.expected_shape.insert(p.expected_shape.begin(), 2);
p.expected_output.reserve(p.expected_output.size() * 2);
p.expected_output.insert(p.expected_output.end(),
p.expected_output.begin(),
p.expected_output.end());
return p;
});
params.push_back(MatMulTestParams{
{1, 2, 4, 5},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39},
false,
{1, 2, 3, 5},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30},
true,
{1, 2, 4, 3},
{40, 90, 140, 115, 290, 465, 190, 490,
790, 265, 690, 1115, 1990, 2540, 3090, 2440,
3115, 3790, 2890, 3690, 4490, 3340, 4265, 5190}});
TestMatMulHelper(this, get_batch_matmul_nodedef, params);
}
#if IS_TRT_VERSION_GE(7, 1, 3, 0)
TEST_P(OpConverter_FP32_Test, ConvertEinsum) {
auto get_einsum_nodedef = [](DataType dtype, std::string eq,
int n_inputs = 2) -> NodeDef {
Scope s = Scope::NewRootScope();
auto a = ops::Placeholder(s.WithOpName("input_a"), dtype);
std::vector<Input> input_vec{a};
if (n_inputs > 1) {
auto b = ops::Placeholder(s.WithOpName("input_b"), dtype);
input_vec.push_back(b);
}
InputList inputs(input_vec);
auto einsum = ops::Einsum(s.WithOpName("my_einsum"), inputs, eq);
return einsum.operation.node()->def();
};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
NodeDef node = get_einsum_nodedef(tf_type_, "ab,cb->ac");
AddTestTensor("input_a", {2, 3});
AddTestTensor("input_b", {2, 3});
const auto& err = convert_not_supported_implicit(node.op(), node.name());
TestOpConverter(node, {2, 2}, errors::Unimplemented(err), OkStatus(),
ElementsAreArray({13, 16, 40, 52}));
return;
}
struct TestParams {
std::string equation;
std::vector<int> shape_a;
std::vector<int> values_a;
std::vector<int> shape_b;
std::vector<int> values_b;
std::vector<int> expected_shape;
std::vector<int> expected_output;
Status conv_status;
};
Status unimplemented_eq = errors::Unimplemented("");
Status internal_err = errors::Internal("");
Status internal_err_before_TRT82 =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? OkStatus() : internal_err;
Status unimplemented_before_TRT82 =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? OkStatus() : unimplemented_eq;
Status diagonal_error = unimplemented_eq;
Status diagonal_error_1_input =
IS_TRT_VERSION_GE(8, 2, 0, 0) ? unimplemented_eq : internal_err;
std::vector<TestParams> params{
TestParams{"i,i->", {2}, {2, 3}, {2}, {1, 2}, {}, {8}, unimplemented_eq},
TestParams{"ik,ik->",
{2, 2},
{2, 3, 4, 1},
{2, 2},
{1, 2, 1, 3},
{},
{15},
unimplemented_eq},
TestParams{"i,k->ik",
{2},
{1, 2},
{3},
{1, 2, 3},
{2, 3},
{1, 2, 3, 2, 4, 6},
unimplemented_eq},
TestParams{"ij,kl->ijkl",
{2, 1},
{1, 2},
{3, 1},
{1, 2, 3},
{2, 1, 3, 1},
{1, 2, 3, 2, 4, 6},
unimplemented_before_TRT82},
TestParams{"ik->ki",
{2, 3},
{0, 1, 2, 3, 4, 5},
{},
{},
{3, 2},
{0, 3, 1, 4, 2, 5},
internal_err_before_TRT82},
TestParams{"ii->i",
{3, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{},
{},
{3},
{0, 4, 8},
diagonal_error_1_input},
TestParams{"ii->",
{3, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8},
{},
{},
{},
{12},
diagonal_error_1_input},
TestParams{"abbc,dc->ad",
{1, 2, 2, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{2, 3},
{1, 2, 3, 4, 5, 6},
{2, 3},
{1, 2, 3, 2, 4, 6},
diagonal_error},
TestParams{"...ik,...jk->...ij",
{1, 3, 1, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 4},
{1, 2, 3, 4, 5, 6, 7, 8},
{2, 3, 1, 1},
{20, 60, 100, 44, 148, 252},
unimplemented_eq},
TestParams{"ab,bc->ac",
{2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2},
{1, 2, 3, 4, 5, 6},
{2, 2},
{13, 16, 40, 52}},
TestParams{"abc,cde->abde",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2, 2},
{23, 26, 29, 32, 68, 80, 92, 104}},
TestParams{"abcd,cde->abe",
{1, 2, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2},
{125, 140, 341, 392}},
TestParams{"aBAE,AEe->aBe",
{1, 2, 2, 3},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 3, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2, 2},
{125, 140, 341, 392}},
TestParams{"abc,cd->abd",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2},
{1, 2, 3, 4, 5, 6},
{1, 2, 2},
{13, 16, 40, 52}},
TestParams{"acbe,aecd->abcd",
{1, 2, 3, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{1, 4, 2, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
{1, 3, 2, 3},
{90, 96, 102, 732, 786, 840, 250, 272, 294, 940, 1010, 1080,
410, 448, 486, 1148, 1234, 1320}},
TestParams{"aecd,abcd->acbe",
{1, 2, 3, 4},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23},
{1, 2, 3, 4},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24},
{1, 3, 2, 2},
{20, 140, 92, 788, 148, 460, 412, 1300, 404, 908, 860, 1940}},
TestParams{"acd,dce->ae",
{1, 2, 3},
{0, 1, 2, 3, 4, 5},
{3, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{1, 2},
{115, 130}},
TestParams{"abcd,bace->bade",
{2, 3, 2, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{3, 2, 2, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
{3, 2, 1, 1},
{2, 46, 28, 128, 86, 242}},
TestParams{
"cebfad,fageb->abcdg",
{1, 1, 3, 3, 2, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35},
{3, 2, 2, 1, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36},
{2, 3, 1, 2, 2},
{252, 288, 291, 336, 768, 912, 810, 963, 1356, 1608, 1401, 1662,
438, 492, 495, 558, 1176, 1338, 1236, 1407, 1986, 2256, 2049, 2328}},
};
for (auto p : params) {
for (bool a_is_tensor : {true, false}) {
for (bool b_is_tensor : {true, false}) {
if (!a_is_tensor && !b_is_tensor) {
continue;
}
Reset();
int n_inputs = p.shape_b.empty() ? 1 : 2;
NodeDef node_def = get_einsum_nodedef(tf_type_, p.equation, n_inputs);
if (a_is_tensor) {
AddTestTensor("input_a", p.shape_a, p.values_a);
} else {
AddTestWeights("input_a", p.shape_a, p.values_a, tf_type_);
}
if (!p.shape_b.empty()) {
if (b_is_tensor) {
AddTestTensor("input_b", p.shape_b, p.values_b);
} else {
AddTestWeights("input_b", p.shape_b, p.values_b, tf_type_);
}
}
TestOpConverter(node_def, p.expected_shape, p.conv_status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
}
}
#endif
TEST_P(OpConverter_FP32_FP16_Test, ConvertBiasAdd) {
auto get_biasadd_nodedef = [](const string& data_format,
DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto weights = ops::Placeholder(s.WithOpName("weights"), tf_type);
const auto biasadd_attrs = ops::BiasAdd::DataFormat(data_format);
auto biasadd =
ops::BiasAdd(s.WithOpName("my_biasadd"), input, weights, biasadd_attrs);
return biasadd.operation.node()->def();
};
for (const string& data_format : {"NHWC", "NCHW"}) {
for (const int trt_input_rank : {1, 2, 3, 4}) {
Reset();
NodeDef node_def = get_biasadd_nodedef(data_format, tf_type_);
std::vector<int32> dims_array(trt_input_rank + 1, 1);
if (trt_input_rank == 1) {
dims_array[1] = (data_format == "NHWC" ? 3 : 2);
} else {
dims_array[1] = 2;
dims_array[trt_input_rank] = 3;
}
const int64_t num_input = DimsAdapter(dims_array).Volume();
ASSERT_EQ(trt_input_rank > 1 ? 6 : (data_format == "NHWC" ? 3 : 2),
num_input);
std::vector<float> input_data(num_input, 0);
AddTestTensor("input", dims_array, input_data);
const int channel_size = (data_format == "NHWC" ? 3 : 2);
std::vector<float> bias(channel_size);
for (int i = 0; i < channel_size; ++i) {
bias[i] = i + 1;
}
AddTestWeights("weights", {channel_size}, bias, tf_type_);
std::vector<float> output_data;
if (trt_input_rank == 1) {
if (data_format == "NHWC") {
output_data = {1, 2, 3};
} else {
output_data = {1, 2};
}
} else {
if (data_format == "NHWC") {
output_data = {1, 2, 3, 1, 2, 3};
} else {
output_data = {1, 1, 1, 2, 2, 2};
}
}
TestOpConverter(node_def, dims_array, OkStatus(), OkStatus(),
ElementsAreArray(output_data));
}
}
}
template <typename OpType>
NodeDef GetBinaryOpNodeDef(DataType dtype) {
Scope s = Scope::NewRootScope();
auto input_l = ops::Placeholder(s.WithOpName("input1"), dtype);
auto input_r = ops::Placeholder(s.WithOpName("input2"), dtype);
auto op = OpType(s.WithOpName("my_binary"), input_l, input_r);
return op.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_BinaryTest, ConvertBinary) {
using OpFunc = std::function<NodeDef(DataType)>;
std::map<std::string, std::pair<OpFunc, std::vector<float>>> op_test_info;
#define ADD_OP(name, op, v1, v2, v3, v4, v5, v6, v7, v8) \
op_test_info[name] = \
std::make_pair(GetBinaryOpNodeDef<op>, \
std::vector<float>(v1, v2, v3, v4, v5, v6, v7, v8))
ADD_OP("Add", ops::Add, {5, 8, 6, 9, 5, 8, 6, 9});
ADD_OP("AddV2", ops::AddV2, {5, 8, 6, 9, 5, 8, 6, 9});
ADD_OP("Sub", ops::Sub, {1, 4, 0, 3, 1, 4, 0, 3});
ADD_OP("Mul", ops::Mul, {6, 12, 9, 18, 6, 12, 9, 18});
ADD_OP("Div", ops::Div, {1.5, 3, 1, 2, 1.5, 3, 1, 2});
ADD_OP("RealDiv", ops::RealDiv, {1.5, 3, 1, 2, 1.5, 3, 1, 2});
ADD_OP("FloorDiv", ops::FloorDiv, {1, 3, 1, 2, 1, 3, 1, 2});
ADD_OP("Minimum", ops::Minimum, {2, 2, 3, 3, 2, 2, 3, 3});
ADD_OP("Maximum", ops::Maximum, {3, 6, 3, 6, 3, 6, 3, 6});
ADD_OP("Pow", ops::Pow, {9, 36, 27, 216, 9, 36, 27, 216});
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_OP("Greater", ops::Greater, {1, 1, 0, 1, 1, 1, 0, 1});
ADD_OP("Less", ops::Less, {0, 0, 0, 0, 0, 0, 0, 0});
ADD_OP("Equal", ops::Equal, {0, 0, 1, 0, 0, 0, 1, 0});
ADD_OP("GreaterEqual", ops::Less, {1, 1, 1, 1, 1, 1, 1, 1});
ADD_OP("LessEqual", ops::Greater, {0, 0, 1, 0, 0, 0, 1, 0});
#endif
#undef ADD_OP
std::vector<std::vector<float>> data = {
{3, 6, 3, 6}, {3, 6}, {2, 3, 2, 3}, {2, 3}};
RunTests(*BinaryOperationMap(), op_test_info, data);
}
TEST_P(OpConverter_BOOL_BinaryTest, ConvertBooleanBinary) {
using OpFunc = std::function<NodeDef(DataType)>;
std::map<std::string, std::pair<OpFunc, std::vector<int>>> op_test_info;
#define ADD_OP(name, op, v1, v2, v3, v4, v5, v6, v7, v8) \
op_test_info[name] = \
std::make_pair(GetBinaryOpNodeDef<op>, \
std::vector<int>(v1, v2, v3, v4, v5, v6, v7, v8))
ADD_OP("LogicalOr", ops::LogicalOr, {1, 1, 0, 1, 1, 1, 0, 1});
ADD_OP("LogicalAnd", ops::LogicalAnd, {0, 1, 0, 0, 0, 1, 0, 0});
#undef ADD_OP
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
std::vector<std::vector<int>> data = {
{0, 1, 0, 1}, {0, 1}, {1, 0, 1, 0}, {1, 0}};
RunTests(*BinaryBooleanOperationMap(), op_test_info, data);
#endif
}
NodeDef GetAddNNodeDef(const std::vector<string>& input_names, DataType dtype) {
Scope s = Scope::NewRootScope();
OutputList inputs;
for (const string& name : input_names) {
inputs.push_back(ops::Placeholder(s.WithOpName(name), dtype));
}
auto op = ops::AddN(s.WithOpName("my_addn"), inputs);
return op.operation.node()->def();
}
struct AddNTestParams {
std::vector<float> input_values;
std::vector<string> input_names;
std::vector<int> dimensions;
std::vector<float> expected_output;
Status status;
};
void TestAddN(ParameterizedOpConverterTestBase* test, AddNTestParams& p) {
test->Reset();
const NodeDef node_def = GetAddNNodeDef(p.input_names, test->get_tf_type());
if (p.input_values.size() % p.input_names.size() != 0) {
LOG(ERROR) << "The number of input values: `" << p.input_values.size()
<< "` is not a multiple of the number of inputs: `"
<< p.input_names.size() << "`";
ASSERT_TRUE(false);
}
DataVec input_data;
int input_offset = 0;
const int window_size = p.input_values.size() / p.input_names.size();
for (const string& name : p.input_names) {
std::vector<float>::const_iterator start_pos =
p.input_values.begin() + input_offset;
std::vector<float>::const_iterator end_pos = start_pos + window_size;
std::vector<float> sub_input_val(start_pos, end_pos);
input_offset += window_size;
test->AddTestTensor(name, p.dimensions, test->get_tf_type(), sub_input_val);
}
test->TestOpConverter(node_def, p.dimensions,
p.status,
p.status,
ElementsAreArray(p.expected_output),
{test->get_tf_type()});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertAddN) {
{
Reset();
const NodeDef node_def = GetAddNNodeDef({"tensor", "weights"}, tf_type_);
AddTestTensor("tensor", {1, 2});
AddTestWeights<float>("weights", {2, 1, 2}, {0, 1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Weights input to AddN is required to have batch dimension 1.");
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<AddNTestParams> params = {
{common_input,
{"inp1", "inp2", "inp3"},
{1, 1, 2, 1, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3, 1, 1},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{1, 2, 1, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3, 1},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{1, 2, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 1, 3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{2, 1},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{1, 3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3"},
{2},
{6, 9},
OkStatus()},
{common_input,
{"inp1", "inp2"},
{3},
{3, 5, 7},
OkStatus()},
{common_input,
{"inp1", "inp2", "inp3", "inp4", "inp5", "inp6"},
{1},
{15},
OkStatus()},
};
for (auto p : params) {
TestAddN(this, p);
}
}
TEST_P(OpConverter_FP32_Test, ConvertQDQDynamicRangeMode) {
{
Reset(TrtPrecisionMode::INT8);
NodeDef node_def =
MakeNodeDef("my_quantize", "FakeQuantWithMinMaxArgs", {"input"});
AddTestTensor("input", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kNotFound,
"No attr named 'min'");
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto quantize_attrs = ops::FakeQuantWithMinMaxArgs::Min(-6.0f).Max(6.0f);
auto quantize = ops::FakeQuantWithMinMaxArgs(s.WithOpName("my_quantize"),
input, quantize_attrs);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::FakeQuantWithMinMaxVars(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::QuantizeAndDequantizeV2(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto quantize = ops::QuantizeAndDequantizeV2(
s.WithOpName("my_quantize"), input, weights_min, weights_max);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestTensor("weights_min", {1});
AddTestTensor("weights_max", {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input_min\" for "
"QuantizeAndDequantizeV2 must be a constant");
}
{
Reset(TrtPrecisionMode::INT8);
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto weights_min = ops::Placeholder(s.WithOpName("weights_min"), DT_FLOAT);
auto weights_max = ops::Placeholder(s.WithOpName("weights_max"), DT_FLOAT);
auto num_bits = ops::Placeholder(s.WithOpName("num_bits"), DT_INT32);
auto quantize = ops::QuantizeAndDequantizeV3(
s.WithOpName("my_quantize"), input, weights_min, weights_max, num_bits);
const NodeDef& node_def = quantize.operation.node()->def();
AddTestTensor("input", {1, 2, 3});
AddTestWeights<float>("weights_min", {1}, {-6.0f});
AddTestWeights<float>("weights_max", {1}, {6.0f});
AddTestWeights<int>("num_bits", {1}, {8});
RunValidationAndConversion(node_def);
TRT_TensorOrWeights output;
TF_EXPECT_OK(GetTensorOrWeights("my_quantize", &output));
ASSERT_TRUE(output.is_tensor());
auto ranges = quantization_ranges();
EXPECT_EQ(1, ranges.count(output.tensor()->trt_tensor()));
EXPECT_EQ(6.0f, ranges[output.tensor()->trt_tensor()]);
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSquare) {
{
Reset();
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto square = ops::Square(s.WithOpName("my_square"), input);
NodeDef node_def = square.operation.node()->def();
AddTestWeights("input", {1, 2, 3}, {1, 2, 3, 4, -5, 6}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"x\" for Square must be a tensor");
}
Reset();
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto square = ops::Square(s.WithOpName("my_square"), input);
NodeDef node_def = square.operation.node()->def();
const int num_inputs = 20;
std::vector<float> inputs(num_inputs);
std::vector<float> expected_outputs(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const float value = (i - 9);
inputs[i] = value;
expected_outputs[i] = value * value;
}
AddTestTensor("input", {1, 1, 20}, tf_type_, inputs);
TestOpConverter(node_def, {1, 1, 20}, OkStatus(), OkStatus(),
ArrayFloatNear(expected_outputs, 0));
}
bool nextTensorWeightConfiguration(std::vector<int>& config) {
for (int i = config.size(); i-- > 0;) {
if ((config[i] = 1 - config[i])) return true;
}
return false;
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertFill) {
Scope s = Scope::NewRootScope();
auto dims = ops::Placeholder(s.WithOpName("dims"), DT_INT32);
auto value = ops::Placeholder(s.WithOpName("value"), tf_type_);
auto fill = ops::Fill(s.WithOpName("my_fill"), dims, value);
const NodeDef& node_def = fill.operation.node()->def();
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
Reset();
AddTestWeights("dims", {2}, {2, 2}, DT_INT32);
AddTestWeights("value", {1}, {42}, tf_type_);
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
convert_not_supported_implicit(node_def.op(), node_def.name()));
return;
}
std::vector<std::vector<int>> output_dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
std::vector<std::vector<int>> value_dims_params = {{}, {1}};
float val = 42.0;
Status status = OkStatus();
for (bool dims_is_tensor : {true, false}) {
for (bool value_is_tensor : {true, false}) {
for (auto output_dims : output_dims_params) {
for (auto value_dims : value_dims_params) {
Reset();
std::vector<int32_t> dims_dims = {
static_cast<int32_t>(output_dims.size())};
if (dims_is_tensor) {
AddTestTensor("dims", dims_dims, DT_INT32, output_dims, dims_dims);
} else {
AddTestWeights("dims", dims_dims, output_dims, DT_INT32);
}
if (value_is_tensor) {
AddTestTensor("value", value_dims, tf_type_,
{static_cast<int>(val)});
} else {
AddTestWeights("value", value_dims, {static_cast<int>(val)},
tf_type_);
}
size_t nb_el = 1;
for (auto d : output_dims) {
nb_el *= d;
}
std::vector<float> expected_output(nb_el, val);
TestOpConverter(node_def, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertRange) {
auto get_casted_value = [this](const float value, const DataType dtype) {
return dtype == DT_INT32 ? static_cast<int32>(value) : value;
};
auto set_parameters = [this](const std::array<const char*, 3>& name,
const std::array<std::vector<float>, 3>& value,
const std::array<DataType, 3>& type,
const std::vector<int>& config,
int shape_idx = -1) {
Reset();
for (int i = 0; i < 3; i++) {
if (config[i]) {
std::vector<int32> partial_shape_dims = {};
if (shape_idx > 3 || (shape_idx >= 0 && shape_idx != i)) {
partial_shape_dims = {1};
}
AddTestTensor(name[i], {1}, type[i], value[i], partial_shape_dims);
} else {
AddTestWeights(name[i], {1}, value[i], type[i]);
}
}
};
const float start = 1.0;
const float limit = 43.0;
const float delta = 2.0;
const std::array<const char*, 3> param_name = {"start", "limit", "delta"};
std::array<std::vector<float>, 3> param_value;
param_value[0] = {start};
param_value[1] = {limit};
param_value[2] = {delta};
const auto start_type = tf_type_;
std::array<DataType, 3> param_type = {tf_type_, tf_type_, tf_type_};
Scope s = Scope::NewRootScope();
const auto range =
ops::Range(s.WithOpName("my_range"),
ops::Placeholder(s.WithOpName(param_name[0]), param_type[0]),
ops::Placeholder(s.WithOpName(param_name[1]), param_type[1]),
ops::Placeholder(s.WithOpName(param_name[2]), param_type[2]));
const NodeDef& ndef = range.operation.node()->def();
const std::vector<DataType> param_types{DT_FLOAT, DT_HALF, DT_INT32};
std::vector<int> config(3, 0);
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
const auto& err = convert_not_supported_implicit(ndef.op(), ndef.name());
do {
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(ndef, absl::StatusCode::kUnimplemented, err);
} while (nextTensorWeightConfiguration(config));
return;
}
const auto& expect_msg = convert_range_expected_msg(ndef);
bool all_weights = true;
do {
for (auto limit_type : param_types) {
param_type[1] = limit_type;
for (auto delta_type : param_types) {
param_type[2] = delta_type;
const auto all_integers = start_type == DT_INT32 &&
limit_type == DT_INT32 &&
delta_type == DT_INT32;
if (all_weights || (all_integers && !config[2])) {
param_value[2] = {0};
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(
ndef, absl::StatusCode::kInvalidArgument,
"The delta parameter of Range operation cannot be equal to 0");
if (!all_weights && !config[2]) {
param_value[2] = {-1};
set_parameters(param_name, param_value, param_type, config);
const string err = StrCat(
"The delta parameter of Range operation "
"cannot be negative, when one of (start, limit) is passed as "
"a tensor, but got ",
param_value[2][0]);
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
err);
}
}
if (all_weights) {
for (int j = 0; j <= 1; j++) {
param_value[j] = {get_casted_value(start, tf_type_)};
param_value[1 - j] = {get_casted_value(limit, limit_type)};
param_value[2] = {(2 * j - 1) *
get_casted_value(delta, delta_type)};
set_parameters(param_name, param_value, param_type, config);
const auto error = convert_range_error_msg(
param_value[0][0], param_value[1][0], param_value[2][0]);
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
error);
}
}
param_value[0] = {start};
param_value[2] = {delta};
if (all_integers) {
if (trt_mode_ == TrtTestMode::kDynamicShape) {
for (int j = 0; j < 3; j++) {
if (!config[j]) continue;
const string err =
StrCat("Dimension for '", param_name[j],
"' of Range operator should be equal to 1");
set_parameters(param_name, param_value, param_type, config, j);
RunValidationAndConversion(
ndef, absl::StatusCode::kInvalidArgument, err);
}
}
} else {
if (!all_weights) {
set_parameters(param_name, param_value, param_type, config);
RunValidationAndConversion(ndef, absl::StatusCode::kUnimplemented,
expect_msg);
}
}
}
}
all_weights = false;
} while (nextTensorWeightConfiguration(config));
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(DT_BOOL, &trt_type));
const std::string error_msg =
"Unsupported data type " + DebugString(trt_type) + " used for '";
do {
for (auto limit_type : param_types) {
param_type[1] = limit_type;
for (auto delta_type : param_types) {
param_type[2] = delta_type;
for (int i = 0; i < 3; i++) {
if (!config[i]) {
const auto saved_type = param_type[i];
param_type[i] = DT_BOOL;
set_parameters(param_name, param_value, param_type, config);
param_type[i] = saved_type;
RunValidationAndConversion(ndef, absl::StatusCode::kInvalidArgument,
error_msg + param_name[i] + "'");
}
}
}
}
} while (nextTensorWeightConfiguration(config));
const Status status = OkStatus();
const std::vector<DataType> int_type{DT_INT32};
int partial_shape_idx = -1;
all_weights = true;
do {
const auto& types = all_weights ? param_types : int_type;
const auto jEnd = all_weights ? 1 : 0;
for (auto limit_type : types) {
param_type[1] = limit_type;
for (auto delta_type : types) {
param_type[2] = delta_type;
for (int j = 0; j <= jEnd; j++) {
const int mult = (1 - 2 * j);
param_value[j] = {get_casted_value(start, tf_type_)};
param_value[1 - j] = {get_casted_value(limit, limit_type)};
param_value[2] = {mult * get_casted_value(delta, delta_type)};
std::vector<float> expected_output;
const float limit_curr = param_value[1][0];
const float delta_curr = param_value[2][0];
float value = param_value[0][0];
int num_values = 0;
while (mult * (limit_curr - value) > 0) {
num_values++;
expected_output.push_back(value);
value += delta_curr;
}
set_parameters(param_name, param_value, param_type, config,
partial_shape_idx);
const std::vector<int> output_dims = {num_values};
TestOpConverter(ndef, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
if (all_weights) {
if (start_type != DT_INT32) break;
if (trt_mode_ == TrtTestMode::kDynamicShape) partial_shape_idx = 3;
all_weights = false;
}
} while (nextTensorWeightConfiguration(config));
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertLikeOps) {
auto get_node = [&](int value) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
if (value == 0) {
auto zeros_like = ops::ZerosLike(s.WithOpName("Zeros"), input);
return zeros_like.operation.node()->def();
}
auto ones_like = ops::OnesLike(s.WithOpName("Ones"), input);
return ones_like.operation.node()->def();
};
for (int value : {0, 1}) {
Reset();
const NodeDef& node_def = get_node(value);
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
std::vector<float> input_data(8, 42.0f);
AddTestTensor("input", {8}, tf_type_, input_data);
const auto& err = convert_not_supported_implicit(node_def.name() + "Like",
node_def.name());
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
err);
continue;
}
std::vector<std::vector<int>> output_dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
float val = 42.0;
Status status = OkStatus();
for (bool input_is_tensor : {true, false}) {
for (auto output_dims : output_dims_params) {
Reset();
size_t nb_el = 1;
for (auto d : output_dims) {
nb_el *= d;
}
std::vector<float> input_data(nb_el, val);
if (input_is_tensor) {
AddTestTensor("input", output_dims, tf_type_, input_data);
} else {
AddTestWeights("input", output_dims, input_data, tf_type_);
}
std::vector<float> expected_output(nb_el, value);
TestOpConverter(node_def, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
}
#endif
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
TEST_P(OpConverter_FP32_Test, ConvertCombinedNMS) {
auto get_nms_nodedef = [](DataType tf_type, bool clip_boxes = true,
bool pad_per_class = false) -> NodeDef {
Scope s = Scope::NewRootScope();
auto boxes_tensor = ops::Placeholder(s.WithOpName("boxes"), tf_type);
auto scores_tensor = ops::Placeholder(s.WithOpName("scores"), tf_type);
auto max_output_size_per_class =
ops::Placeholder(s.WithOpName("max_output_size_per_class"), DT_INT32);
auto max_total_size =
ops::Placeholder(s.WithOpName("max_total_size"), DT_INT32);
auto iou_threshold =
ops::Placeholder(s.WithOpName("iou_threshold"), tf_type);
auto score_threshold =
ops::Placeholder(s.WithOpName("score_threshold"), tf_type);
auto nms_attrs = ops::CombinedNonMaxSuppression::Attrs()
.PadPerClass(pad_per_class)
.ClipBoxes(clip_boxes);
auto nms_op = ops::CombinedNonMaxSuppression(
s.WithOpName("my_nms"), boxes_tensor, scores_tensor,
max_output_size_per_class, max_total_size, iou_threshold,
score_threshold, nms_attrs);
return nms_op.operation.node()->def();
};
struct TestParams {
const std::string description;
const std::vector<int32> boxes_tensor_dims;
const std::vector<int32> scores_tensor_dims;
const std::vector<float> boxes_values;
const std::vector<float> scores_values;
const int32 max_output_size_per_class;
const int32 max_total_size;
const float iou_threshold;
const float score_threshold;
const bool pad_per_class;
const bool clip_boxes;
const std::vector<std::vector<int32>> expected_output_dims;
const std::vector<float> exp_boxes;
const std::vector<float> exp_scores;
const std::vector<float> exp_classes;
const std::vector<float> exp_num_detections;
Status conversion_status;
Status runtime_status;
};
#if IS_TRT_VERSION_GE(8, 2, 1, 6) || defined(TF_TRT_USE_EFFICIENT_NMS_PLUGIN)
Status conv_status =
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(convert_not_supported_implicit(
"CombinedNonMaxSuppression", "my_nms"))
: OkStatus();
std::vector<TestParams> params = {
TestParams{"Test 1: clip boxes",
{1, 1, 3, 4},
{1, 1, 3},
{0, 0, 0.3, 1.4, 0, 0, 0.3, 1.4, 0, 0, 0.3, 1.4},
{0.4, 0.7, 0.3},
3,
2,
0.1,
0,
false,
true,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 0.3, 1.0, 0, 0, 0.3, 1.0},
{0.7, 0.4},
{1, 0},
{2},
conv_status},
TestParams{
"Test 2: iou threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.7,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 3, 2, 1},
{0, 0, 0, 0},
{4},
conv_status},
TestParams{
"Test 3: score threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
2,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 0, 0, 0, 0, 0, 0, 0, 0},
{5, 3, 0, 0},
{0, 0, 0, 0},
{2},
conv_status},
TestParams{
"Test 4: per class size and pad",
{1, 5, 1, 4},
{1, 5, 2},
{0, 0, 5, 10, 0, 1, 5, 11, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 0, 0, 4, 3, 0, 2, 0, 1, 0},
1,
4,
0.1,
0,
true,
false,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 5, 10, 0, 1, 5, 11},
{5, 4},
{0, 1},
{2},
conv_status},
TestParams{
"Test 5: different box coordinate order",
{1, 5, 1, 4},
{1, 5, 2},
{5, 10, 0, 0, 5, 11, 0, 1, 12, 4, 8, 0, 10, 6, 6, 2, 11, 12, 8, 9},
{5, 0, 0, 4, 3, 0, 2, 0, 1, 0},
1,
4,
0.1,
0,
true,
false,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{5, 10, 0, 0, 5, 11, 0, 1},
{5, 4},
{0, 1},
{2},
conv_status},
};
#else
Status conv_status =
trt_mode_ == TrtTestMode::kDynamicShape
? errors::Unimplemented(
"TensorRT BatchedNMS Plugin requires input with static shape")
: OkStatus();
std::vector<TestParams> params = {
TestParams{
"Test 1: Original test",
{1, 1, 3, 4},
{1, 1, 3},
{0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4},
{0.4, 0.7, 0.3},
3,
2,
.5f,
0,
false,
true,
{{1, 2, 4},
{1, 2},
{1, 2},
{1}},
{0, 0, 0.3, 0.4, 0, 0, 0.3, 0.4},
{0.7, 0.4},
{1, 0},
{2},
conv_status},
TestParams{
"Test 2: clip_boxes",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 4, 5, 14, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 8, 9, 11, 12, 0, 0, 0, 0},
{5, 3, 1, 0},
{0, 0, 0, -1},
{3},
conv_status},
TestParams{
"Test 3: score threshold",
{1, 5, 1, 4},
{1, 5, 1},
{0, 0, 5, 10, 0, 4, 5, 14, 8, 0, 12, 4, 6, 2, 10, 6, 8, 9, 11, 12},
{5, 4, 3, 2, 1},
4,
4,
0.1,
2,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{0, 0, 5, 10, 8, 0, 12, 4, 0, 0, 0, 0, 0, 0, 0, 0},
{5, 3, 0, 0},
{0, 0, -1, -1},
{2},
conv_status},
TestParams{
"Test 4: max coord first",
{1, 5, 1, 4},
{1, 5, 1},
{5, 10, 0, 0, 5, 14, 0, 4, 12, 4, 8, 0, 10, 6, 6, 2, 11, 12, 8, 9},
{5, 4, 3, 2, 1},
4,
4,
0.1,
0,
false,
false,
{{1, 4, 4},
{1, 4},
{1, 4},
{1}},
{5, 10, 0, 0, 12, 4, 8, 0, 11, 12, 8, 9, 0, 0, 0, 0},
{5, 3, 1, 0},
{0, 0, 0, -1},
{3},
conv_status},
TestParams{"Test 5: TopK error",
{1, 5000, 1, 4},
{1, 5000, 1},
{},
{},
4,
4,
0.1,
0,
false,
false,
{},
{},
{},
{},
{},
conv_status.ok()
? errors::InvalidArgument(
"TRT NMS plugin allow top_k<=4096, where top_k = "
"max(num_boxes, max_total_size). You can override "
"this by setting TF_TRT_ALLOW_NMS_TOPK_OVERRIDE=1 "
"environment variable, but this can result in a "
"loss of accuracy.")
: conv_status},
};
#endif
for (auto p : params) {
Reset();
SCOPED_TRACE(p.description);
AddTestTensor("boxes", p.boxes_tensor_dims, p.boxes_values);
AddTestTensor("scores", p.scores_tensor_dims, p.scores_values);
AddTestWeights<int32>("max_output_size_per_class", {1},
{p.max_output_size_per_class});
AddTestWeights<int32>("max_total_size", {1}, {p.max_total_size});
AddTestWeights<float>("iou_threshold", {1}, {p.iou_threshold}, tf_type_);
AddTestWeights<float>("score_threshold", {1}, {p.score_threshold},
tf_type_);
auto node_def = get_nms_nodedef(tf_type_, p.clip_boxes, p.pad_per_class);
TestOpConverterMultiOut(node_def, p.expected_output_dims,
p.conversion_status, p.runtime_status,
{
ElementsAreArray(p.exp_boxes),
ElementsAreArray(p.exp_scores),
ElementsAreArray(p.exp_classes),
ElementsAreArray(p.exp_num_detections),
},
{tf_type_, tf_type_, tf_type_, DT_INT32});
}
}
#endif
template <typename T>
NodeDef CreateUnaryOp(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
return T(s.WithOpName("my_unary"), input).operation.node()->def();
}
constexpr float kLeakyReluAlpha = 0.2f;
template <>
NodeDef CreateUnaryOp<ops::internal::LeakyRelu>(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
return ops::internal::LeakyRelu(
s.WithOpName("my_unary"), input,
ops::internal::LeakyRelu::Alpha(kLeakyReluAlpha))
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_UnaryTest, ConvertActivation) {
constexpr float kSeluAlpha = 1.7580993408473768599402175208123f;
constexpr float kSeluScale = 1.0507009873554804934193349852946f;
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = float (*)(float);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = std::make_pair(CreateUnaryOp<op>, compute)
ADD_OP("LeakyRelu", ops::internal::LeakyRelu,
[](float x) { return (x > 0.0f) ? x : x * kLeakyReluAlpha; });
ADD_OP("Relu", ops::Relu, [](float x) { return (x > 0.0f) ? x : 0.0f; });
ADD_OP("Relu6", ops::Relu6,
[](float x) { return std::min(std::max(x, 0.0f), 6.0f); });
ADD_OP("Sigmoid", ops::Sigmoid,
[](float x) { return 1.0f / (1.0f + std::exp(-x)); });
ADD_OP("Tanh", ops::Tanh, static_cast<ValFunc>(std::tanh));
ADD_OP("Elu", ops::Elu,
[](float x) { return (x > 0.0f) ? x : std::exp(x) - 1; });
ADD_OP("Selu", ops::Selu, [](float x) {
return (x > 0.0f) ? kSeluScale * x
: kSeluScale * kSeluAlpha * (std::exp(x) - 1);
});
ADD_OP("Softsign", ops::Softsign,
[](float x) { return x / (std::abs(x) + 1); });
ADD_OP("Softplus", ops::Softplus,
[](float x) { return std::log(std::exp(x) + 1); });
#undef ADD_OP
const std::vector<float> input = {-100, -2, -1, 0, 1, 88};
const bool nan_sensitive = false;
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
const float max_abs_error = 1e-4;
#else
const float max_abs_error = 0.;
#endif
RunTests("Activation", *ActivationTypeMap(), op_map, input, "input",
max_abs_error, nan_sensitive);
}
TEST_P(OpConverter_FP32_Test, ConvertExpandDims) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto expanddims =
ops::ExpandDims(s.WithOpName("my_expanddims"), input, weights);
const NodeDef& node_def = expanddims.operation.node()->def();
{
Reset();
AddTestWeights<int32>("input", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("weights", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for ExpandDims must be a "
"tensor");
}
{
Reset();
AddTestTensor("input", {3, 2, 1});
AddTestTensor("weights", {3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for ExpandDims must be a "
"constant");
}
std::vector<TestParamBase> test_params = {
TestParamBase{{1, 1, 2, 3},
{},
{1, 1, 1, 2, 3},
{0},
trt_mode_ == TrtTestMode::kImplicitBatch
? Status(absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus()},
TestParamBase{{1, 1, 2, 3},
{},
{1, 1, 1, 2, 3},
{-5},
trt_mode_ == TrtTestMode::kImplicitBatch
? Status(absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus()},
TestParamBase{{1, 1, 2, 3},
{},
{},
{5},
Status(absl::StatusCode::kInvalidArgument,
"Axis value of 5 is out of bounds, must be in range"
" [-5, 5)")},
TestParamBase{{1, 1, 2, 3},
{},
{},
{-6},
Status(absl::StatusCode::kInvalidArgument,
"Axis value of -6 is out of bounds, must be in range"
" [-5, 5)")},
TestParamBase{{1, 2, 3}, {}, {1, 1, 2, 3}, {1}},
TestParamBase{{1, 2, 3}, {}, {1, 1, 2, 3}, {-3}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 3, 1}, {3}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 3, 1}, {-1}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 1, 3}, {2}},
TestParamBase{{1, 2, 3}, {}, {1, 2, 1, 3}, {-2}},
TestParamBase{{1, 6}, {}, {1, 1, 6}, {1}},
TestParamBase{{1, 6}, {}, {1, 6, 1}, {-1}},
};
for (auto p : test_params) {
Reset();
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("weights", {1}, {p.param[0]});
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSoftmax) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("logits"), tf_type_);
auto softmax = ops::Softmax(s.WithOpName("my_softmax"), input);
const NodeDef& node_def = softmax.operation.node()->def();
struct TestParams {
std::vector<int> input_dims;
std::vector<float> expected_values;
};
std::vector<TestParams> test_params = {
TestParams{{2, 3},
{0.09003057, 0.24472848, 0.66524094,
0.09003057, 0.24472848, 0.66524094}},
TestParams{{6, 1},
{1, 1, 1, 1, 1, 1}},
TestParams{{1, 6},
{0.00426978, 0.01160646, 0.03154963,
0.08576079, 0.23312202, 0.6336913}}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
for (auto p : test_params) {
Reset();
AddTestTensor("logits", p.input_dims, input_values);
TestOpConverter(node_def, p.input_dims, OkStatus(), OkStatus(),
ArrayFloatNear(p.expected_values, 1e-3));
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertLogSoftmax) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("logits"), tf_type_);
auto logsoftmax = ops::LogSoftmax(s.WithOpName("my_logsoftmax"), input);
const NodeDef& node_def = logsoftmax.operation.node()->def();
struct TestParams {
std::vector<int> input_dims;
std::vector<float> expected_values;
};
std::vector<TestParams> test_params = {
TestParams{{2, 3},
{-2.4076061, -1.407606, -0.40760604,
-2.4076061, -1.407606, -0.40760604}},
TestParams{{1, 6},
{-5.4561934, -4.4561934, -3.4561934,
-2.4561934, -1.4561933, -0.45619333}},
TestParams{{6, 1},
{0, 0, 0, 0, 0, 0}}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
for (auto p : test_params) {
Reset();
AddTestTensor("logits", p.input_dims, input_values);
TestOpConverter(node_def, p.input_dims, OkStatus(), OkStatus(),
ArrayFloatNear(p.expected_values, 1e-3));
}
}
TEST_P(OpConverter_FP32_Test, ConvertSqueeze) {
const bool use_implicit_batch = (trt_mode_ == TrtTestMode::kImplicitBatch);
auto get_squeeze_nodedef = [](std::vector<int> axes,
DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
if (!axes.empty()) {
ops::Squeeze::Attrs squeeze_attrs;
squeeze_attrs.axis_ = gtl::ArraySlice<int>(axes);
auto squeeze =
ops::Squeeze(s.WithOpName("my_squeeze"), input, squeeze_attrs);
return squeeze.operation.node()->def();
} else {
auto squeeze = ops::Squeeze(s.WithOpName("my_squeeze"), input);
return squeeze.operation.node()->def();
}
};
std::vector<TestParamBase> test_params = {
TestParamBase{
{1, 2, 1, 3},
{},
{2, 3},
{},
trt_mode_ == TrtTestMode::kExplicitBatch
? OkStatus()
: Status{absl::StatusCode::kUnimplemented,
"Squeeze is not implemented for empty squeeze_dims"}},
TestParamBase{{1, 2, 1, 3},
{},
{2, 1, 3},
{0},
use_implicit_batch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension"}
: OkStatus()},
TestParamBase{{1, 2, 1, 3},
{},
{2, 1, 3},
{-4},
use_implicit_batch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension"}
: OkStatus()},
TestParamBase{
{1, 1, 2, 3},
{},
{},
{4},
Status{absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in range [-4, 4)"}},
TestParamBase{
{1, 1, 2, 3},
{},
{},
{-5},
Status{
absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in range [-4, 4)"}},
TestParamBase{{1, 1, 2, 3}, {}, {1, 2, 3}, {1}},
TestParamBase{{1, 1, 2, 3}, {}, {1, 2, 3}, {-3}},
TestParamBase{{1, 2, 3, 1}, {}, {1, 2, 3}, {3}},
TestParamBase{{1, 2, 3, 1}, {}, {1, 2, 3}, {-1}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {1, 3, 5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {3, 1, 5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {-1, -3, -5}},
TestParamBase{{1, 1, 2, 1, 3, 1}, {}, {1, 2, 3}, {1, -3, 5}},
TestParamBase{{1, 1, 6}, {}, {1, 6}, {1}},
TestParamBase{{1, 6, 1}, {}, {1, 6}, {2}},
};
auto squeeze_non_singleton = TestParamBase{
{1, 1, 2, 3},
{},
{},
{2},
Status{absl::StatusCode::kInvalidArgument,
"Dimension 2 with size 2 cannot be squeezed because it must be "
"size 1"}};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
squeeze_non_singleton.status = OkStatus();
squeeze_non_singleton.runtime_status =
errors::InvalidArgument("Negative number of dimensions -1");
test_params.push_back(TestParamBase{{2, 1, 3}, {2, -1, 3}, {2, 3}, {1}});
test_params.push_back(TestParamBase{{2, 1, 3}, {2, 1, -1}, {2, 3}, {1}});
}
test_params.push_back(squeeze_non_singleton);
for (TestParamBase p : test_params) {
SCOPED_TRACE(p);
Reset();
NodeDef node_def = get_squeeze_nodedef(p.param, tf_type_);
AddTestTensor("input", p.input_dims, {1, 2, 3, 4, 5, 6},
p.partial_input_dims);
TestOpConverter(node_def, p.expected_output_dims, p.status,
p.runtime_status, ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertStridedSlice) {
auto get_strided_slice_nodedef =
[](DataType tf_type, int64 begin_mask = 0, int64 end_mask = 0,
int64 ellipsis_mask = 0, int64 new_axis_mask = 0,
int64 shrink_axis_mask = 0) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto begin = ops::Placeholder(s.WithOpName("begin"), DT_INT32);
auto end = ops::Placeholder(s.WithOpName("end"), DT_INT32);
auto strides = ops::Placeholder(s.WithOpName("strides"), DT_INT32);
ops::StridedSlice::Attrs attrs = ops::StridedSlice::Attrs()
.BeginMask(begin_mask)
.EndMask(end_mask)
.EllipsisMask(ellipsis_mask)
.NewAxisMask(new_axis_mask)
.ShrinkAxisMask(shrink_axis_mask);
auto strided_slice = ops::StridedSlice(s.WithOpName("my_strided_slice"),
input, begin, end, strides, attrs);
return strided_slice.operation.node()->def();
};
{
Reset();
NodeDef node_def = get_strided_slice_nodedef(tf_type_);
AddTestWeights<int32>("input", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<int32>("begin", {4}, {0, 0, 0, 0});
AddTestWeights<int32>("end", {4}, {1, 1, 2, 3});
AddTestWeights<int32>("strides", {4}, {1, 1, 1, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for StridedSlice must "
"be a tensor");
}
{
Reset();
NodeDef node_def = get_strided_slice_nodedef(tf_type_);
AddTestTensor("input", {4, 1, 1, 1});
AddTestTensor("begin", {4});
AddTestTensor("end", {4});
AddTestTensor("strides", {4});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"begin\" for StridedSlice must be a constant");
}
struct TestParams {
std::vector<int> input_dims;
std::vector<int> begin;
std::vector<int> end;
std::vector<int> strides;
int begin_mask;
int end_mask;
int ellipsis_mask;
int new_axis_mask;
int shrink_axis_mask;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
Status runtime_status;
std::vector<int> partial_input_dims;
};
auto get_mask = [](const std::vector<int>& mask) {
int result = 0;
for (int i = 0; i < mask.size(); i++) {
if (mask[i]) result += (1 << i);
}
return result;
};
const std::vector<float> ok_input = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
Status modified_batch_dim_status =
(trt_mode_ == TrtTestMode::kImplicitBatch)
? errors::Unimplemented(
"TensorRT does not allow modifications to "
"the batch dimension")
: OkStatus();
std::vector<TestParams> params = {
TestParams{{2, 1, 1, 3},
{0, 0, 0, 0},
{1, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
modified_batch_dim_status,
OkStatus(),
{}},
TestParams{
{2, 1, 1, 3},
{0, 0, 0, 0},
{1, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
modified_batch_dim_status,
OkStatus(),
{-1, 1, 1, 3},
},
TestParams{
{2, 1, 1, 3},
{0, 0, 0, 0},
{0, 1, 1, 2},
{1, 1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{2, 1, 1, 2},
{1, 2, 4, 5},
OkStatus(),
OkStatus(),
{-1, 1, 1, 3},
},
TestParams{{1, 1, 2, 3},
{0, 0, 2, 0},
{1, 1, 0, 3},
{1, 1, 1, 1},
0,
0,
0,
0,
0,
{},
{},
errors::InvalidArgument("\"size\" cannot be negative for "
"StridedSlice"),
OkStatus(),
{}},
TestParams{
{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
OkStatus(),
OkStatus(),
{1, 1, -1, -1},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
OkStatus(),
OkStatus(),
{1, 1, -1, -1},
},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 1, 2, 3},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
},
TestParams{{1, 1, 2, 3},
{0, 0, 1, 2},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{6, 5},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 1, 1},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 2, 2},
{5, 4, 2, 1},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 0},
{1, 1, -1, -1},
get_mask({0, 0, 1, 1}),
get_mask({1, 1, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{6, 5},
OkStatus(),
OkStatus(),
{1, 1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 0},
{1, -1, -1, -1},
get_mask({1, 1, 1, 1}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 2, 3},
{6, 5, 4, 3, 2, 1},
OkStatus(),
OkStatus(),
{1, -1, -1, -1}},
TestParams{
{1, 2, 3, 1},
{0, 0, 0, 0},
{0, 1, 2, 1},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 1},
{1, 2},
},
TestParams{
{1, 2, 3, 1},
{0, 1, 1, 0},
{0, 2, 3, 1},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 1},
{5, 6},
},
TestParams{
{1, 2, 1, 3},
{0, 0, 0, 0},
{0, 1, 1, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{1, 2},
},
TestParams{
{1, 2, 1, 3},
{0, 1, 0, 1},
{0, 2, 1, 3},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 1, 2},
{5, 6},
},
TestParams{
{1, 2, 3},
{0, 0, 0},
{0, 1, 2},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 0}),
0,
0,
0,
{1, 1, 2},
{1, 2},
},
TestParams{{1, 2, 3},
{0, 1, 1},
{0, 0, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 1, 1}),
0,
0,
0,
{1, 1, 2},
{5, 6},
OkStatus(),
OkStatus(),
{-1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0},
{0, 0, 0, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 0}),
0,
0,
0,
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 1, 2, 3},
{0, 0, 1, 0},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 1, 3},
{4, 5, 6},
},
TestParams{{1, 2, 3, 1},
{0, 0, 0, 0},
{0, 1, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 1, 1}),
0,
0,
0,
{1, 1, 3, 1},
{1, 2, 3},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 2, 3, 1},
{0, 1, 0, 0},
{0, 0, 0, 0},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 1, 1, 1}),
0,
0,
0,
{1, 1, 3, 1},
{4, 5, 6},
},
TestParams{{1, 6},
{0, 0},
{0, 3},
{1, 1},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 2, 3},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{
{1, 1, 6},
{0, 0, 2},
{0, 0, 5},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 1, 0}),
0,
0,
0,
{1, 1, 3},
{3, 4, 5},
},
TestParams{
{1, 6, 1},
{0, 2, 0},
{0, 5, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 3, 1},
{3, 4, 5},
},
TestParams{
{1, 6, 1},
{0, -6, 0},
{0, -3, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 3, 1},
{1, 2, 3},
},
TestParams{
{1, 6, 1},
{0, 0, 0},
{0, -1, 0},
{1, 1, 1},
get_mask({0, 0, 0}),
get_mask({1, 0, 1}),
0,
0,
0,
{1, 5, 1},
{1, 2, 3, 4, 5},
},
TestParams{
{1, 1, 2, 3},
{0, 0, -9999, -9},
{0, 1, 1000, 4},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
0,
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
},
TestParams{{1, 6},
{0, 0},
{0, 5},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 3, 5},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 0},
{0, 6},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{1, 3, 5},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 1},
{0, 6},
{1, 2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{2, 4, 6},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 2},
{0, 6},
{1, 3},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 2},
{3, 6},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 0},
{1, -2},
get_mask({0, 0}),
get_mask({1, 1}),
0,
0,
0,
{1, 3},
{6, 4, 2},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 0},
{1, -2},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 3},
{6, 4, 2},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 6},
{0, 5},
{0, 1},
{1, -3},
get_mask({0, 0}),
get_mask({1, 0}),
0,
0,
0,
{1, 2},
{6, 3},
OkStatus(),
OkStatus(),
{-1, -1}},
TestParams{{1, 1, 2, 3},
{0, 1},
{0, 2},
{1, 1},
get_mask({0, 0}),
get_mask({0, 0}),
get_mask({1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{
{1, 1, 2, 3},
{0, 0, 1},
{0, 0, 2},
{1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
get_mask({0, 1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 1, 2, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 1, 0, 1},
{1, 1, 2, 2},
{1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({0, 1, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 0, 1},
{0, 1, 1, 2, 2},
{1, 1, 1, 1, 1},
get_mask({0, 0, 0, 0}),
get_mask({0, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
{1, 1, 2, 1},
{2, 5},
OkStatus(),
OkStatus(),
{-1, -1, -1, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 0, 0, 2},
{1, 1, 1, 1},
get_mask({1, 1, 1, 0}),
get_mask({1, 1, 1, 0}),
0,
0,
get_mask({0, 0, 0, 1}),
{1, 1, 2},
{2, 5},
OkStatus(),
OkStatus(),
{1, 1, 2, -1}},
TestParams{{1, 1, 2, 3},
{0, 0, 0, 1},
{0, 1, 2, 2},
{1, 1, 1, 1},
get_mask({1, 0, 0, 0}),
get_mask({1, 0, 0, 0}),
0,
0,
get_mask({0, 1, 0, 1}),
{1, 2},
{2, 5},
OkStatus(),
OkStatus(),
{1, 1, 2, -1}},
TestParams{{6, 1, 1},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
get_mask({1, 1, 1}),
get_mask({1, 1, 1}),
0,
0,
get_mask({0, 1, 1}),
{6},
{1, 2, 3, 4, 5, 6},
OkStatus(),
OkStatus(),
{-1, -1, -1}},
TestParams{{1, 6},
{0, 0, 0},
{0, 0, 0},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
get_mask({1, 0, 0}),
get_mask({0, 0, 0}),
{1, 1, 6},
{1, 1, 6},
errors::Unimplemented(
"new_axis_mask is not supported for StridedSlice"),
OkStatus(),
{1, 6}},
TestParams{{1, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
1,
{3, 2},
{1, 2, 3, 4, 5, 6},
modified_batch_dim_status, OkStatus(),
{-1, -1, -1}},
TestParams{{2, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
1,
{3, 2},
{1, 2, 3, 4, 5, 6},
modified_batch_dim_status, OkStatus(),
{-1, -1, 2}},
TestParams{{2, 3, 2},
{0, 0, 0},
{0, 0, 3},
{1, 1, 1},
get_mask({0, 1, 1}),
get_mask({0, 1, 1}),
0,
0,
3,
{2},
{1, 2},
modified_batch_dim_status, OkStatus(),
{-1, -1, 2}},
};
int i = 0;
for (auto p : params) {
Reset();
NodeDef node_def = get_strided_slice_nodedef(
tf_type_, p.begin_mask, p.end_mask, p.ellipsis_mask, p.new_axis_mask,
p.shrink_axis_mask);
VLOG(2) << "Preparing test case " << i++ << " with dims "
<< DebugString(p.input_dims);
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, ok_input);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, ok_input);
break;
}
case TrtTestMode::kDynamicShape: {
if (p.partial_input_dims.size() > 0) {
AddTestTensor("input", p.input_dims, tf_type_, ok_input,
p.partial_input_dims);
} else {
AddTestTensor("input", p.input_dims, tf_type_, ok_input,
p.input_dims);
}
break;
}
}
VLOG(2) << "Adding weights begin: " << DebugString(p.begin)
<< ", end: " << DebugString(p.end)
<< ", strides: " << DebugString(p.strides);
AddTestWeights<int32>("begin", {static_cast<int>(p.begin.size())}, p.begin);
AddTestWeights<int32>("end", {static_cast<int>(p.end.size())}, p.end);
AddTestWeights<int32>("strides", {static_cast<int>(p.strides.size())},
p.strides);
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertSlice) {
auto get_slice_nodedef = [](DataType tf_type) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto begin = ops::Placeholder(s.WithOpName("begin"), DT_INT32);
auto size = ops::Placeholder(s.WithOpName("size"), DT_INT32);
auto slice = ops::Slice(s.WithOpName("my_slice"), input, begin, size);
return slice.operation.node()->def();
};
struct TestParams {
std::vector<int> input_dims;
std::vector<int>
partial_input_dims;
std::vector<int> begin;
std::vector<int> size;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
Status conversion_status;
Status runtime_status;
};
std::vector<TestParams> params = {
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, -1, 0},
{1, 1, 2, 3},
{},
{},
errors::InvalidArgument("\"begin\" in Slice "
"is out of range")},
TestParams{{2, 1, 1, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 1, 3},
{1, 1, 1, 3},
{1, 2, 3},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"TensorRT does not allow modifications to the batch "
"dimension in implicit batch mode")
: OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{-1, 1, 2, 2},
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{-1, -1, -1, -1},
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
OkStatus()},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 2, 3},
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6}},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, -1, 2, 2},
{1, 1, 2, 2},
{1, 2, 4, 5},
OkStatus()},
TestParams{{1, 6},
{-1, -1},
{0, 1},
{1, 5},
{1, 5},
{2, 3, 4, 5, 6}},
TestParams{{1, 6},
{-1, -1},
{0, 1},
{-1, 3},
{1, 3},
{2, 3, 4}, OkStatus()},
TestParams{
{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 3, 0},
{1, 1, 2, 3},
{},
{},
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("\"begin\" + \"size\" for dimension "
"2 in Slice is out of range"),
errors::Internal("Internal: Failed to build TensorRT engine")},
TestParams{{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 2, -2},
{},
{},
errors::InvalidArgument("\"size\" in Slice is out of range")},
TestParams{
{1, 1, 2, 3},
{-1, -1, -1, -1},
{0, 0, 0, 0},
{1, 1, 3, 2},
{},
{},
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("\"begin\" + \"size\" for dimension "
"2 in Slice is out of range"),
errors::Internal("Internal: Failed to build TensorRT engine")},
};
logger_.unsuppressAllLoggerMsgs();
int i = 0;
for (auto p : params) {
Reset();
NodeDef node_def = get_slice_nodedef(tf_type_);
VLOG(2) << "Preparing test case " << i++ << " with dims "
<< DebugString(p.input_dims);
std::vector<int> input_vals = {1, 2, 3, 4, 5, 6};
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, input_vals);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, input_vals);
break;
}
case TrtTestMode::kDynamicShape: {
if (p.partial_input_dims.size() > 0) {
AddTestTensor("input", p.input_dims, tf_type_, input_vals,
p.partial_input_dims);
} else {
AddTestTensor("input", p.input_dims, tf_type_, input_vals,
p.input_dims);
}
break;
}
}
AddTestWeights<int32>("begin", {static_cast<int>(p.begin.size())}, p.begin);
AddTestWeights<int32>("size", {static_cast<int>(p.size.size())}, p.size);
const bool flag =
trt_mode_ == TrtTestMode::kDynamicShape && (i == 9 || i == 11);
if (flag) logger_.suppressLoggerMsgs(nvinfer1::ILogger::Severity::kERROR);
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
if (flag) logger_.unsuppressLoggerMsgs(nvinfer1::ILogger::Severity::kERROR);
}
}
TEST_P(OpConverter_FP32_Test, ConvertConv2D) {
DataType tf_type = tf_type_;
auto get_conv2d_nodedef =
[tf_type](std::vector<int> strides = {1, 1, 1, 1},
string padding = "SAME", string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type);
ops::Conv2D::Attrs attrs =
ops::Conv2D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv2d = ops::Conv2D(s.WithOpName("my_conv2d"), input, filter, strides,
padding, attrs);
return conv2d.operation.node()->def();
};
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestWeights<float>("input", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Conv2D must be a tensor");
}
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestTensor("input", {3, 1, 2, 1});
AddTestTensor("weights", {3, 3, 1, 1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"filter\" for Conv2D must be a constant");
}
{
Reset();
NodeDef node_def = get_conv2d_nodedef();
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Conv2D expects kernel of dimension 4");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NCHW", {1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution dilations field must specify 4 dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NCHW", {1, 2, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1, 1}, "SAME", "NHWC", {1, 1, 1, 2});
AddTestTensor("input", {1, 2, 3, 1});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 1, 1}, "SAME", "NCHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution strides field must specify 4 dimensions");
}
{
Reset();
NodeDef node_def =
get_conv2d_nodedef({1, 2, 1, 1}, "SAME", "NCHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3});
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Stride must be 1 for batch and channel dimensions");
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
Reset();
NodeDef node_def = get_conv2d_nodedef();
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(tf_type_, &trt_type));
AddTestTensorWithTFDims("input", {-1, -1, -1, -1}, trt_type);
AddTestWeights<float>("weights", {1, 2, 1, 1}, {-1, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Channel dimension must be static");
}
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
};
std::vector<TestParams> ok_params = {
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 2},
{1, 1, 0, 1}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 3},
{1, 1, -2, 0, 1, -4}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 3, 1, 1},
{-1, 0, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 3},
{1, 2, -1, 3, 1, -3}},
TestParams{{1, 2, 3, 1},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NHWC",
{1, 1, 1, 1},
{1, 2, 2, 1},
{1, 1, 0, 1}},
TestParams{{1, 1, 2, 3},
{0, 1, 2, 3, 3, 4},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"VALID",
"NCHW",
{1, 1, 1, 2},
{1, 1, 2, 1},
{2, 1}},
TestParams{{1, 1, 2, 4},
{0, 1, 2, 2, 3, 4, 4, 7},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"VALID",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 2},
{1, 0, 1, 3}},
};
for (int i = 0; i < ok_params.size(); i++) {
Reset();
NodeDef node_def =
get_conv2d_nodedef(ok_params[i].strides, ok_params[i].padding,
ok_params[i].data_format, ok_params[i].dilations);
std::vector<int> partial_input_shape;
if (trt_mode_ == TrtTestMode::kDynamicShape) {
partial_input_shape.resize(ok_params[i].input_dims.size(), -1);
int channel_id = (ok_params[i].data_format == "NCHW") ? 1 : 3;
partial_input_shape[channel_id] = ok_params[i].input_dims[channel_id];
}
AddTestTensor("input", ok_params[i].input_dims, tf_type_,
ok_params[i].input, partial_input_shape);
AddTestWeights<float>("weights", ok_params[i].filter_dims,
ok_params[i].filter);
TestOpConverter(node_def, ok_params[i].expected_output_dims, OkStatus(),
OkStatus(), ElementsAreArray(ok_params[i].expected_output));
}
}
TEST_P(OpConverter_FP32_Test, ConvertConv2DBackpropInput) {
auto get_conv2d_backprop_input_nodedef =
[](DataType tf_type, std::vector<int> strides = {1, 1, 1, 1},
string padding = "SAME", string data_format = "NCHW",
std::vector<int> dilations = {1, 1, 1, 1}) -> NodeDef {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto filter = ops::Placeholder(s.WithOpName("weights"), tf_type);
auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv2DBackpropInput::Attrs attrs = ops::Conv2DBackpropInput::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv2d = ops::Conv2DBackpropInput(
s.WithOpName("my_conv2d_backprop_input"), input_sizes, filter, input,
strides, padding, attrs);
return conv2d.operation.node()->def();
};
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
std::vector<int> partial_input_dims;
};
std::vector<TestParams> params = {
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3}},
TestParams{{1, 2, 2, 1},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 2, 1},
"SAME",
"NHWC",
{1, 1, 1, 1},
{1, 2, 4, 1},
{0, 0, -1, 1, -2, 2, -3, 3}},
TestParams{{1, 3, 1, 1},
{0, 1, 2},
{2, 1, 1, 1},
{-1, 1},
{1, 2, 1, 1},
"VALID",
"NHWC",
{1, 1, 1, 1},
{1, 7, 1, 1},
{0, 0, -1, 1, -2, 2, 0}},
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"EXPLICIT",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::Unimplemented("EXPLICIT padding type not "
"implemented, only VALID and SAME are"
" supported")},
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 1},
"SAME",
"NCHW",
{1, 1, 1, 2},
{1, 1, 2, 2},
{},
errors::Unimplemented("Dilation with Conv2DBackpropInput "
"(conv2d_transpose) is not supported")},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::InvalidArgument("Channel dimension must be static"),
{1, -1, 2, 2}});
params.push_back(TestParams{{2, 1, 2, 2},
{0, 1, 2, 3,
3, 2, 1, 0},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{2, 1, 2, 4},
{ 0, 0, -1, 1, -2, 2, -3, 3,
-3, 3, -2, 2, -1, 1, 0, 0},
OkStatus(),
{-1, 1, 2, 2}});
params.push_back(TestParams{
{1, 1, 2, 2},
{0, 1, 2, 3},
{1, 2, 1, 1},
{-1, 1},
{1, 1, 1, 2},
"SAME",
"NCHW",
{1, 1, 1, 1},
{1, 1, 2, 4},
{0, 0, -1, 1, -2, 2, -3, 3},
errors::Unimplemented(
"Conv2dBackpropInput does not support input with unknown spatial "
"shape"),
{1, 1, -1, -1}});
}
for (auto p : params) {
for (int input_sizes_length : {2, 4}) {
Reset();
NodeDef node_def = get_conv2d_backprop_input_nodedef(
tf_type_, p.strides, p.padding, p.data_format, p.dilations);
switch (trt_mode_) {
case TrtTestMode::kImplicitBatch: {
AddTestTensor("input", p.input_dims, p.input);
break;
}
case TrtTestMode::kExplicitBatch: {
AddTestTensor("input", p.input_dims, p.input);
break;
}
case TrtTestMode::kDynamicShape: {
AddTestTensor("input", p.input_dims, tf_type_, p.input,
p.partial_input_dims.size() > 0 ? p.partial_input_dims
: p.input_dims);
break;
}
default: {
ASSERT_TRUE(false) << "unknown test mode";
}
}
AddTestWeights<float>("weights", p.filter_dims, p.filter, tf_type_);
if (input_sizes_length == 4) {
AddTestWeights<int>("input_sizes", {4}, p.expected_output_dims);
} else {
std::vector<int> tf_input_sizes(2);
if (p.data_format == "NHWC") {
std::copy(p.expected_output_dims.begin() + 1,
p.expected_output_dims.end() - 1, tf_input_sizes.begin());
} else {
std::copy(p.expected_output_dims.begin() + 2,
p.expected_output_dims.end(), tf_input_sizes.begin());
}
QCHECK_EQ(2, tf_input_sizes.size());
AddTestWeights<int>("input_sizes", {2}, tf_input_sizes);
}
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
OkStatus(), ElementsAreArray(p.expected_output));
}
}
}
NodeDef GetConv3DNodeDef(std::vector<int> strides = {1, 1, 1, 1, 1},
string padding = "SAME", string data_format = "NCDHW",
std::vector<int> dilations = {1, 1, 1, 1, 1},
bool is_conv3d_backprop_input = false) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_FLOAT);
auto filter = ops::Placeholder(s.WithOpName("weights"), DT_FLOAT);
if (is_conv3d_backprop_input) {
auto input_sizes = ops::Placeholder(s.WithOpName("input_sizes"), DT_INT32);
ops::Conv3DBackpropInputV2::Attrs attrs =
ops::Conv3DBackpropInputV2::Attrs()
.DataFormat(data_format)
.Dilations(dilations);
auto conv3d =
ops::Conv3DBackpropInputV2(s.WithOpName("my_conv3d"), input_sizes,
filter, input, strides, padding, attrs);
return conv3d.operation.node()->def();
} else {
ops::Conv3D::Attrs attrs =
ops::Conv3D::Attrs().DataFormat(data_format).Dilations(dilations);
auto conv3d = ops::Conv3D(s.WithOpName("my_conv3d"), input, filter, strides,
padding, attrs);
return conv3d.operation.node()->def();
}
}
struct Conv3DTestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> filter_dims;
std::vector<float> filter;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> dilations;
bool is_conv3d_backprop;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
bool allow_dynamic_channel_dim;
Status validation_status;
};
void TestConv3D(ParameterizedOpConverterTestBase* test, Conv3DTestParams& p) {
test->Reset();
NodeDef node_def = GetConv3DNodeDef(p.strides, p.padding, p.data_format,
p.dilations, p.is_conv3d_backprop);
std::vector<int> partial_input_shape;
if (!p.allow_dynamic_channel_dim &&
test->get_trt_mode() == TrtTestMode::kDynamicShape) {
partial_input_shape.resize(p.input_dims.size(), -1);
int channel_id = (p.data_format == "NCDHW") ? 1 : 4;
partial_input_shape[channel_id] = p.input_dims[channel_id];
}
test->AddTestTensor("input", p.input_dims, test->get_tf_type(), p.input,
partial_input_shape);
test->AddTestWeights<float>("weights", p.filter_dims, p.filter);
if (p.is_conv3d_backprop) {
test->AddTestWeights<float>("input_sizes",
{static_cast<int>(p.expected_output.size())},
p.expected_output);
}
test->TestOpConverter(node_def, p.expected_output_dims,
p.validation_status,
OkStatus(),
ElementsAreArray(p.expected_output),
{test->get_tf_type()});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertConv3D) {
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestWeights<float>("input", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestWeights<float>("weights", {1, 3, 3, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Conv3D must be a tensor");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestTensor("weights", {1, 3, 3, 1}, tf_type_,
CreateVectorIota<float>(9));
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"filter\" for Conv3D must be a constant");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef();
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1}, {1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Conv3D expects kernel of dimension 5");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>(
"weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution dilations field must specify 5 dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 2, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC", {1, 1, 1, 1, 2});
AddTestTensor("input", {1, 2, 3, 1}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation rate must be 1 for batch and channel "
"dimensions");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC",
{1, 1, 2, 1, 1}, true);
AddTestTensor("input", {1, 2, 3, 1}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddTestWeights<int>("input_sizes", {4}, {1, 2, 3, 1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Dilation with Conv3DBackpropInputV2 "
"(conv3d_transpose) is not supported");
}
{
Reset();
NodeDef node_def = GetConv3DNodeDef({1, 1, 1, 1, 1}, "SAME", "NDHWC",
{1, 1, 1, 1, 1}, true);
AddTestTensor("input", {1, 2, 2, 2}, tf_type_, CreateVectorIota<float>(8));
AddTestWeights<float>("weights", {1, 1, 2, 1, 1}, {1, 1});
AddTestWeights<int>("input_sizes", {8}, {1, 2, 3, 4, 5, 6, 7, 8});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Asymmetric padding with Conv3DBackpropInputV2 "
"(conv3d_transpose) is not supported");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 1, 1, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1, 1});
AddTestTensor("input", {1, 2, 2, 2}, tf_type_, CreateVectorIota<float>(8));
AddTestWeights<float>("weights", {1, 1, 2, 1, 1}, {1, 1});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Convolution strides field must specify 5 dimensions");
}
{
Reset();
NodeDef node_def =
GetConv3DNodeDef({1, 2, 1, 1, 1}, "SAME", "NCDHW", {1, 1, 1, 1, 1});
AddTestTensor("input", {1, 1, 2, 3}, tf_type_, CreateVectorIota<float>(6));
AddTestWeights<float>("weights", {3, 3, 1, 1, 1},
{1, 2, 3, 4, 5, 6, 7, 8, 9});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Stride must be 1 for batch and channel dimensions");
}
std::vector<Conv3DTestParams> ok_params = {
{{1, 1, 3, 3, 3},
{1, 2, 15, 3, 6, -3, 22, 1, 88, 56, 36, 1, 1, 105,
1, 16, -28, 1, 42, 9, 3, 1, 7, 1, 11, 61, 5},
{1, 1, 1, 1, 1},
{1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 3, 3, 3},
{1, 2, 15, 3, 6, -3, 22, 1, 88,
56, 36, 1, 1, 105, 1, 16, -28, 1,
42, 9, 3, 1, 7, 1, 11, 61, 5},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 6},
{2, 1, 1, 1, 1},
{1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 3},
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 7},
false,
OkStatus()},
{{1, 1, 2, 3, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 1, 1},
{-1, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 2},
{6, 6, 6, 6, 6, 6, -6, -7, -8, -9, -10, -11},
false,
OkStatus()},
{{1, 1, 2, 3, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{3, 1, 1, 1, 1},
{-1, 0, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 3, 2},
{6, 7, 8, 9, 10, 11, 0, -1, -2, -3, -4, -5},
false,
OkStatus()
},
{{1, 2, 3, 2, 2},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},
{2, 1, 1, 2, 1},
{-1, 1, 1, -1},
{1, 1, 1, 1, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
false,
{1, 1, 3, 2, 1},
{0, 0, 0, 0, 0, 0},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1, 1, 1, 1, 1, -10, -10, -10, -10, -10,
-10, -10, -10, -10, 7, 7, 7, 7, 7, 7, 7, 7, 7},
{2, 1, 1, 1, 1},
{1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 2, 1, 1},
false,
{1, 1, 1, 3, 3},
{8, 8, 8, 8, 8, 8, 8, 8, 8},
false,
OkStatus()},
{{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 5, 0, 6, 0, 0, 0, 7, 0, 8},
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{1, 1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
false,
OkStatus()},
{{1, 1, 2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
true,
{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4,
0, 0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 6, 0, 0, 0, 7, 0, 8},
false,
OkStatus()},
};
if (trt_mode_ == TrtTestMode::kDynamicShape) {
ok_params.reserve(ok_params.size() + 2);
const std::vector<float> common_input = CreateVectorIota<float>(3 * 3 * 3);
ok_params.push_back(Conv3DTestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
false,
{},
{},
true,
Status{absl::StatusCode::kInvalidArgument,
"Channel dimension must be static"}});
ok_params.push_back(Conv3DTestParams{
{1, 3, 3, 3, 1},
common_input,
{1, 1, 1, 1, 1},
{1},
{1, 2, 2, 2, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
false,
{},
{},
true,
Status{absl::StatusCode::kInvalidArgument,
"Channel dimension must be static"}});
}
for (auto p : ok_params) {
TestConv3D(this, p);
}
}
template <typename T>
NodeDef CreatePoolOp(DataType tf_type, std::vector<int> ksize,
std::vector<int> strides, string padding,
string data_format) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
typename T::Attrs attrs;
attrs.data_format_ = data_format;
return T(s.WithOpName("my_pool"), input, ksize, strides, padding, attrs)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_Test, ConvertPool) {
auto get_pool_nodedef =
[](DataType tf_type, int nDim, std::vector<int> ksize = {},
std::vector<int> strides = {}, string padding = "SAME",
string data_format = "", const bool is_max_pooling = true) -> NodeDef {
if (ksize.empty()) {
ksize = nDim == 2 ? std::vector<int>{1, 1, 1, 1}
: std::vector<int>{1, 1, 1, 1, 1};
}
if (strides.empty()) {
strides = nDim == 2 ? std::vector<int>{1, 1, 1, 1}
: std::vector<int>{1, 1, 1, 1, 1};
}
if (data_format == "") {
data_format = nDim == 2 ? "NCHW" : "NCDHW";
}
if (is_max_pooling) {
if (nDim == 3) {
return CreatePoolOp<ops::MaxPool3D>(tf_type, ksize, strides, padding,
data_format);
} else {
return CreatePoolOp<ops::MaxPool>(tf_type, ksize, strides, padding,
data_format);
}
} else {
if (nDim == 3) {
return CreatePoolOp<ops::AvgPool3D>(tf_type, ksize, strides, padding,
data_format);
} else {
return CreatePoolOp<ops::AvgPool>(tf_type, ksize, strides, padding,
data_format);
}
}
};
std::vector<int> test_nDims{2, 3};
for (int nDim : test_nDims) {
Reset();
NodeDef node_def = get_pool_nodedef(tf_type_, nDim);
AddTestWeights<float>("input", {1, 1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
StrCat("The input \"input\" for ", node_def.op(), " must be a tensor"));
}
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input;
std::vector<int> ksize;
std::vector<int> strides;
string padding;
string data_format;
std::vector<int> expected_output_dims;
std::vector<std::vector<float>> expected_outputs;
Status status;
std::set<int> skip_dims;
};
const std::vector<float> common_input{-4, 2, 15, 3, 6, -3, 22, 1, 88,
56, 36, 1, 1, 105, 1, 16, -28, 1,
42, 9, 3, 1, 7, 1, 11, 61, 5};
const std::vector<float> common_2d_output{-4, 2, 15, 3, 6, -3, 22, 1, 88};
std::vector<TestParams> test_params = {
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1000, 1000, 1000},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, -1, 1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds"),
{2}},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, -1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, -1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input},
Status(absl::StatusCode::kInvalidArgument,
"Window dimensions are not within bounds")},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input}},
TestParams{
{1, 1, 3, 3, 3},
common_input,
{1, 1, 1, 1, 1},
{1, 1, 1, 1, 1},
"SAME",
"NCDHW",
{1, 1, 3, 3, 3},
{common_2d_output, common_2d_output, common_input, common_input}},
TestParams{{1, 1, 3, 3, 3},
common_input,
{1, 1, 3, 3, 3},
{1, 1, 1, 1, 1},
"VALID",
"NCDHW",
{1, 1, 1, 1, 1},
{{88}, {14.444445}, {105}, {17}}},
TestParams{{1, 3, 3, 3, 1},
common_input,
{1, 3, 3, 3, 1},
{1, 1, 1, 1, 1},
"VALID",
"NDHWC",
{1, 1, 1, 1, 1},
{{88}, {14.444445}, {105}, {17}}},
TestParams{{1, 1, 3, 3, 3},
{1, 0, 2, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0,
0, 0, 0, 0, 5, 0, 6, 0, 0, 0, 7, 0, 8},
{1, 1, 1, 1, 1},
{1, 1, 2, 2, 2},
"VALID",
"NCDHW",
{1, 1, 2, 2, 2},
{{1, 2, 3, 4},
{1, 2, 3, 4},
{1, 2, 3, 4, 5, 6, 7, 8},
{1, 2, 3, 4, 5, 6, 7, 8}}},
};
for (auto p : test_params) {
int test_counter = 0;
for (int nDim : test_nDims) {
if (p.skip_dims.find(nDim) != p.skip_dims.end()) {
continue;
}
auto input = p.input;
auto input_dims = p.input_dims;
auto ksize = p.ksize;
auto strides = p.strides;
auto expected_output_dims = p.expected_output_dims;
std::string data_format = p.data_format;
if (nDim == 2) {
input.resize(9);
data_format = p.data_format == "NDHWC" ? "NHWC" : "NCHW";
input_dims.erase(input_dims.begin() + 2);
ksize.erase(ksize.begin() + 2);
strides.erase(strides.begin() + 2);
expected_output_dims.erase(expected_output_dims.begin() + 2);
}
for (bool is_max_pooling : {true, false}) {
Reset();
NodeDef node = get_pool_nodedef(tf_type_, nDim, ksize, strides,
p.padding, data_format, is_max_pooling);
AddTestTensor("input", input_dims, input);
TestOpConverter(node, expected_output_dims, p.status, OkStatus(),
ElementsAreArray(p.expected_outputs.at(test_counter)));
test_counter++;
}
}
}
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertTopK) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type_);
auto weights = ops::Placeholder(s.WithOpName("weights"), DT_INT32);
auto topk = ops::TopK(s.WithOpName("my_topk"), input, weights);
const NodeDef& node_def = topk.operation.node()->def();
{
Reset();
AddTestTensor("input", {1, 1, 2, 3});
AddTestTensor("weights", {1}, DT_INT32, {});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"k\" for TopKV2 must be a constant");
}
{
Reset();
AddTestTensor("input", {1, 1, 2, 5}, {-9, 3, 5, 1, 6, -5, 7, 1, 0, -1});
AddTestWeights<int32>("weights", {1}, {2});
std::vector<std::vector<int>> expected_output_dims{{1, 1, 2, 2},
{1, 1, 2, 2}};
TestOpConverterMultiOut(node_def, expected_output_dims, OkStatus(),
OkStatus(),
{ElementsAre(6, 5, 7, 1), ElementsAre(4, 2, 1, 2)},
{tf_type_, DT_INT32});
}
}
struct DataFormatVecPermuteTestParams {
string dst_format;
string src_format;
std::vector<int> x_shape;
std::vector<int> x;
bool x_is_tensor;
std::vector<int> expected_output;
Status conversion_status;
};
NodeDef GetDataFormatVecPermuteNodeDef(string dst_format, string src_format,
std::vector<int>& x_shape) {
Scope s = Scope::NewRootScope();
PartialTensorShape tensor_shape;
auto x = ops::Placeholder(s.WithOpName("x"), DT_INT32);
const auto attrs = ops::DataFormatVecPermute::Attrs()
.DstFormat(dst_format)
.SrcFormat(src_format);
auto dfvp = ops::DataFormatVecPermute(s.WithOpName("my_dfvp"), x, attrs);
return dfvp.operation.node()->def();
}
TEST_P(OpConverter_INT32_Test, ConvertDataFormatVecPermute) {
const auto& error = convert_not_supported_implicit(
string("DataFormatVecPermute"), string("my_dfvp"));
const Status implicit_error = Status{absl::StatusCode::kUnimplemented, error};
const auto conversion_status =
trt_mode_ == TrtTestMode::kImplicitBatch ? implicit_error : OkStatus();
std::vector<DataFormatVecPermuteTestParams> test_params = {
DataFormatVecPermuteTestParams{"NCHW",
"NHWC",
{4},
{1, 2, 3, 4},
true,
{1, 4, 2, 3},
conversion_status},
DataFormatVecPermuteTestParams{"NCHW",
"NHWC",
{4},
{1, 2, 3, 4},
false,
{1, 4, 2, 3},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{1, 2, 7, 8, 3, 4, 5, 6},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
false,
{1, 2, 7, 8, 3, 4, 5, 6},
conversion_status},
DataFormatVecPermuteTestParams{
"NCDHW",
"NDHWC",
{5, 2},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
true,
{1, 2, 9, 10, 3, 4, 5, 6, 7, 8},
conversion_status},
DataFormatVecPermuteTestParams{"NCWH",
"NHWC",
{2, 2},
{1, 2, 3, 4},
true,
{3, 4, 1, 2},
conversion_status},
DataFormatVecPermuteTestParams{"NCHWD",
"NDHWC",
{3},
{1, 2, 3},
true,
{2, 3, 1},
conversion_status},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{2, 2, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"Input must be a vector or matrix, but got rank 3, at "
"my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{3},
{1, 2, 3},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"1D input must be of size 2 or 4, but got size 3, at "
"my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCDHW",
"NDHWC",
{4, 2},
{1, 2, 3, 4, 5, 6, 7, 8},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"First dimension of 2D input must be of size 3 or 5, "
"but got shape (4, 2), at my_dfvp"}},
DataFormatVecPermuteTestParams{
"NCHW",
"NHWC",
{4, 3},
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12},
true,
{},
trt_mode_ == TrtTestMode::kImplicitBatch
? implicit_error
: Status{absl::StatusCode::kInvalidArgument,
"Second dimension of 2D input must be of size 2, but "
"got shape (4, 3), at my_dfvp"}},
};
for (auto p : test_params) {
Reset();
const NodeDef node_def =
GetDataFormatVecPermuteNodeDef(p.dst_format, p.src_format, p.x_shape);
if (p.x_is_tensor) {
AddTestTensor("x", p.x_shape, DT_INT32, p.x, p.x_shape);
} else {
AddTestWeights("x", p.x_shape, p.x, DT_INT32);
}
TestOpConverter(node_def, p.x_shape, p.conversion_status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
NodeDef CreateGatherOp(DataType tf_type, int batch_dims) {
Scope s = Scope::NewRootScope();
auto params = ops::Placeholder(s.WithOpName("params"), tf_type);
auto indices = ops::Placeholder(s.WithOpName("indices"), DT_INT32);
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
ops::GatherV2::Attrs op_attrs;
op_attrs.batch_dims_ = batch_dims;
auto gather =
ops::GatherV2(s.WithOpName("my_gather"), params, indices, axis, op_attrs);
const NodeDef& node_def = gather.operation.node()->def();
return node_def;
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertGather) {
auto node_def = CreateGatherOp(tf_type_, 0);
{
Reset();
AddTestTensor("params", {1, 1, 2, 3}, tf_type_, {});
AddTestTensor("indices", {1, 2}, DT_INT32, {});
AddTestTensor("axis", {1}, DT_INT32, {});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for GatherV2 must be a constant");
}
{
Reset();
AddTestTensor("params", {1, 1, 2, 3});
AddTestTensor("indices", {1, 2}, DT_INT32, {});
AddTestWeights<int32>("axis", {1}, {4});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
struct TestParams {
std::vector<int> params_shape;
std::vector<int> indices_shape;
std::vector<int> indices;
int axis;
int batch_dims;
std::vector<int> expected_output_shape;
std::vector<int> expected_output;
bool params_is_tensor;
bool indices_is_tensor;
Status conversion_status;
Status runtime_status;
Status add_index_status;
};
const std::vector<int> params_input = {1, 2, 3, 4, 5, 6};
std::vector<TestParams> test_params = {
TestParams{{2, 1, 1, 3},
{2},
{1, 0},
0,
0,
{2, 1, 1, 3},
{4, 5, 6, 1, 2, 3},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow "
"manipulation of the batch dimension"}
: OkStatus()},
TestParams{{2, 1, 3},
{2, 1},
{2, 0},
2,
0,
{2, 1, 2, 1},
{3, 1, 6, 4},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{{2, 1, 3},
{2, 1},
{2, 0},
2,
0,
{2, 1, 2, 1},
{3, 1, 6, 4},
true,
false,
OkStatus()},
TestParams{{2, 1, 3},
{2},
{1, 2},
2,
0,
{2, 1, 2},
{2, 3, 5, 6},
false,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input axis must be zero when "
"params is a weight."}
: OkStatus()},
TestParams{
{6},
{2},
{1, 3},
0,
0,
{2},
{2, 4},
true,
true,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow "
"manipulation of the batch dimension"}
: OkStatus(),
OkStatus(),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kInvalidArgument,
batch_size_error("indices",
"Provided batch size does not match "
"converter batch size: 2 vs 6")}
: OkStatus()},
TestParams{
{1, 1, 2, 3},
{1},
{0},
3,
0,
{1, 1, 2, 1},
{1, 4},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1},
{1},
2,
0,
{1, 1, 1, 3},
{4, 5, 6},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{0},
3,
0,
{1, 1, 2, 1, 1},
{1, 4},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{1},
3,
0,
{1, 1, 2, 1, 1},
{2, 5},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{2},
-1,
0,
{1, 1, 2, 1, 1},
{3, 6},
true,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 3},
{2, 0, 1},
3,
0,
{1, 1, 2, 1, 3},
{3, 1, 2, 6, 4, 5},
true,
true,
},
TestParams{
{1, 3, 2},
{1, 2, 2},
{0, 0, 1, 0},
2,
0,
{1, 3, 1, 2, 2},
{1, 1, 2, 1, 3, 3, 4, 3, 5, 5, 6, 5},
true,
true,
},
TestParams{
{1, 2, 3},
{1},
{0},
0,
0,
{1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
true,
},
TestParams{
{3, 2},
{1, 2},
{0, 1},
0,
0,
{1, 2, 2},
{1, 2, 3, 4},
false,
true,
},
TestParams{
{2, 3},
{1, 1, 2},
{0, 1},
0,
0,
{1, 1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
true,
},
TestParams{
{3, 2},
{2, 2},
{0, 2, 1, 0},
0,
0,
{2, 2, 2},
{1, 2, 5, 6, 3, 4, 1, 2},
false,
true,
},
TestParams{
{1, 1, 2, 3},
{1, 1},
{0},
3,
0,
{1, 1, 2, 1, 1},
{1, 4},
true,
false,
},
TestParams{{1, 2, 3},
{1},
{0},
0,
0,
{1, 2, 3},
{1, 2, 3, 4, 5, 6},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{{3, 2},
{2, 2},
{0, 2, 1, 0},
0,
0,
{2, 2, 2},
{1, 2, 5, 6, 3, 4, 1, 2},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"Params and indices must have a"
" batch size of 1 when params and indices are "
"both tensors or both"
" constants."}
: OkStatus()},
TestParams{
{2, 3},
{2, 2},
{0, 1, 1, 2},
1,
1,
{2, 2},
{1, 2, 5, 6},
false,
false,
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input axis must be zero when params is a weight."}
: OkStatus()},
};
for (auto p : test_params) {
Reset();
auto node_def = CreateGatherOp(tf_type_, p.batch_dims);
if (p.params_is_tensor) {
AddTestTensor("params", p.params_shape, params_input);
} else {
AddTestWeights("params", p.params_shape, params_input, tf_type_);
}
if (p.indices_is_tensor) {
AddTestTensor("indices", p.indices_shape, DT_INT32, p.indices, {},
p.add_index_status);
} else {
std::vector<int> indices_shape(p.indices_shape);
AddTestWeights("indices", indices_shape, p.indices, DT_INT32);
}
AddTestWeights<int32>("axis", {1}, {p.axis});
TestOpConverter(node_def, p.expected_output_shape, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef CreateReduceOp(DataType tf_type, bool keep_dims) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), tf_type);
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
typename OpType::Attrs op_attrs;
op_attrs.keep_dims_ = keep_dims;
auto op = OpType(s.WithOpName("my_reduce"), input, axis, op_attrs);
return op.operation.node()->def();
}
std::vector<float> CalcReduce(string op_name, std::vector<float> input, int m,
float (*op)(float, float), float init) {
std::vector<float> output(input.size() / m);
for (int i = 0; i < output.size(); i++) {
auto begin = input.begin() + i * m;
auto end = input.begin() + (i + 1) * m;
output[i] = std::accumulate(begin, end, init, op);
if (op_name == "Mean") {
output[i] /= m;
}
}
return output;
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertReduce) {
{
Reset();
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
AddTestWeights<float>("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
AddTestWeights<int32>("axis", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for Sum must be a tensor");
}
{
Reset();
const NodeDef node_def = CreateReduceOp<ops::Sum>(tf_type_, false);
AddTestTensor("input", {1, 2, 3}, {-3, -2, -1, 0, 1, 2});
AddTestTensor("axis", {1}, DT_INT32, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for Sum must be a constant");
}
using OpFunc = std::function<NodeDef(DataType, bool)>;
using ValFunc = float (*)(float, float);
struct ReduceTestDescriptor {
string name;
OpFunc get_node;
ValFunc val_func;
float init_val;
};
std::vector<ReduceTestDescriptor> op_test_info{
{"Sum", CreateReduceOp<ops::Sum>, [](float x, float y) { return x + y; },
0},
{"Prod", CreateReduceOp<ops::Prod>,
[](float x, float y) { return x * y; }, 1},
{"Mean", CreateReduceOp<ops::Mean>,
[](float x, float y) { return x + y; }, 0},
{"Min", CreateReduceOp<ops::Min>,
[](float x, float y) { return y < x ? y : x; }, 1000},
{"Max", CreateReduceOp<ops::Max>,
[](float x, float y) { return x < y ? y : x; }, -1000}};
std::vector<float> input_values{1, 2, 3, 4, 5, 6};
struct TestParams {
std::vector<int> input_dims;
std::vector<float> input_values;
std::vector<float> helper_array;
std::vector<int> axis;
int stride;
Status conversion_status;
};
std::vector<TestParams> params{
TestParams{{2, 3, 1}, input_values, input_values, {3}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {-4}, 3},
TestParams{{2, 3, 1}, input_values, {1, 4, 2, 5, 3, 6}, {0}, 2},
TestParams{{2, 3, 1}, input_values, input_values, {1}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {2}, 1},
TestParams{{2, 3, 1}, input_values, input_values, {0, 1}, 6},
TestParams{{2, 3, 1}, input_values, {1, 4, 2, 5, 3, 6}, {-3}, 2},
TestParams{{2, 3, 1}, input_values, input_values, {-2}, 3},
TestParams{{2, 3, 1}, input_values, input_values, {-1}, 1},
TestParams{{2, 3, 1}, input_values, input_values, {-3, 1}, 6},
};
for (bool keep_dims : {false, true}) {
for (auto& op : op_test_info) {
VLOG(2) << "Processing " << op.name << " with keep_dims=" << keep_dims;
for (auto p : params) {
SCOPED_TRACE(StrCat(op.name, keep_dims ? " & keep_dims" : ""));
Reset();
NodeDef node_def = op.get_node(tf_type_, keep_dims);
AddTestTensor("input", p.input_dims, p.input_values);
AddTestWeights<int32>("axis", {static_cast<int>(p.axis.size())},
p.axis);
std::vector<int> expected_output_dims(p.input_dims);
for (int ax : p.axis) {
int rank = p.input_dims.size();
if (ax >= rank || ax < -rank) {
p.conversion_status =
errors::InvalidArgument("Axis value of ", ax,
" is out of bounds, must be in "
"range [",
-rank, ", ", rank, ")");
} else {
int ax_positive = ax >= 0 ? ax : ax + rank;
expected_output_dims[ax_positive] = keep_dims ? 1 : 0;
if (trt_mode_ == TrtTestMode::kImplicitBatch &&
(ax == 0 || ax == -rank)) {
p.conversion_status = errors::Unimplemented(
"TensorRT does not allow manipulation of the batch "
"dimension");
}
}
}
expected_output_dims.erase(std::remove(expected_output_dims.begin(),
expected_output_dims.end(), 0),
expected_output_dims.end());
VLOG(2) << "out dims "
<< absl::StrCat("[", absl::StrJoin(expected_output_dims, ","),
"]");
std::vector<float> expected_values = CalcReduce(
op.name, p.helper_array, p.stride, op.val_func, op.init_val);
if (tf_type_ == DT_INT32) {
std::for_each(expected_values.begin(), expected_values.end(),
[](float& _n) { _n = std::floor(_n); });
}
TestOpConverter(node_def, expected_output_dims, p.conversion_status,
OkStatus(), ArrayFloatNear(expected_values));
}
}
}
}
NodeDef CreateCastOp(DataType tf_type) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), DT_HALF);
return ops::Cast(s.WithOpName("my_unary"), input, DT_FLOAT)
.operation.node()
->def();
}
TEST_P(OpConverter_FP32_UnaryTest, ConvertUnary) {
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = float (*)(float);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = \
std::make_pair(CreateUnaryOp<op>, static_cast<ValFunc>(compute))
ADD_OP("Abs", ops::Abs, std::abs);
ADD_OP("Acos", ops::Acos, std::acos);
ADD_OP("Acosh", ops::Acosh, std::acosh);
ADD_OP("Asin", ops::Asin, std::asin);
ADD_OP("Asinh", ops::Asinh, std::asinh);
ADD_OP("Atan", ops::Atan, std::atan);
ADD_OP("Atanh", ops::Atanh, std::atanh);
op_map["Cast"] = std::make_pair(CreateCastOp, [](float x) { return x; });
ADD_OP("Ceil", ops::Ceil, std::ceil);
ADD_OP("Cos", ops::Cos, std::cos);
ADD_OP("Cosh", ops::Cosh, std::cosh);
ADD_OP("Exp", ops::Exp, std::exp);
ADD_OP("Erf", ops::Erf, std::erf);
ADD_OP("Floor", ops::Floor, std::floor);
ADD_OP("Log", ops::Log, std::log);
ADD_OP("Neg", ops::Neg, [](float x) { return -x; });
ADD_OP("Reciprocal", ops::Reciprocal, [](float x) { return 1.0f / x; });
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
ADD_OP("Round", ops::Round, [](float x) { return (float)std::round(x); });
ADD_OP("Sign", ops::Sign,
[](float x) { return x > 0 ? 1.0f : (x < 0 ? -1.0f : 0.0f); });
#endif
ADD_OP("Rsqrt", ops::Rsqrt, [](float x) { return 1.0f / std::sqrt(x); });
ADD_OP("Sin", ops::Sin, std::sin);
ADD_OP("Sinh", ops::Sinh, std::sinh);
ADD_OP("Sqrt", ops::Sqrt, std::sqrt);
ADD_OP("Tan", ops::Tan, std::tan);
#undef ADD_OP
std::vector<float> input_values{-0.9f, 0.6f, 0.0f, -3.5f, 100.0f, 2.9f};
RunTests("Unary", *UnaryOperationMap(), op_map, input_values, "x");
}
TEST_P(OpConverter_BOOL_Test, ConvertBoolean) {
std::vector<int> input_values{1, 0, 1, 0, 0, 1};
using OpFunc = std::function<NodeDef(DataType)>;
using ValFunc = int (*)(int);
std::map<std::string, std::pair<OpFunc, ValFunc>> op_map;
#define ADD_OP(name, op, compute) \
op_map[name] = \
std::make_pair(CreateUnaryOp<op>, static_cast<ValFunc>(compute))
ADD_OP("LogicalNot", ops::LogicalNot, [](int x) { return 1 - x; });
#undef ADD_OP
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
RunTests("LogicalUnary", *UnaryBooleanOperationMap(), op_map, input_values,
"x");
#endif
}
auto get_concat_nodedef = [](DataType dtype, int num_inputs) -> NodeDef {
Scope s = Scope::NewRootScope();
std::vector<Input> values;
values.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const string input_name = StrCat("values_", i);
values.push_back(ops::Placeholder(s.WithOpName(input_name), dtype));
}
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
auto concat = ops::Concat(s.WithOpName("my_concat"),
absl::Span<const Input>(values), axis);
return concat.operation.node()->def();
};
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertConcat) {
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 2, 3});
AddTestTensor("axis", {1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for ConcatV2 must be a constant");
}
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 2, 3});
AddTestWeights<int32>("axis", {1}, {4});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_concat_nodedef(tf_type_, 2);
AddTestTensor("values_0", {1, 1, 2, 3});
AddTestTensor("values_1", {1, 1, 6});
AddTestWeights<int32>("axis", {1}, {1});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent rank");
}
struct TestParams {
std::vector<std::vector<int>> input_shapes;
std::vector<std::vector<int>> input_values;
std::vector<bool> inputs_are_tensors;
int axis;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
Status conversion_status;
Status run_status;
};
const std::vector<std::vector<int>> common_input{CreateVectorIota<int>(6),
CreateVectorIota<int>(6, 6)};
std::vector<TestParams> params = {
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
2,
{1, 1, 4, 3},
CreateVectorIota<int>(12),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
3,
{1, 1, 2, 6},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11},
},
{
{{1, 1}, {1, 2}, {1, 3}, {1, 1}, {1, 1}, {1, 2}},
{{1}, {2, 3}, {4, 5, 6}, {7}, {8}, {9, 10}},
{true, true, true, true, true, true},
1,
{1, 10},
CreateVectorIota<int>(10, 1),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, false},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"The input \"values_1\" for ConcatV2 must be a tensor")
: OkStatus(),
OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{false, false},
1,
{1, 2, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"The input \"values_0\" for ConcatV2 must be a tensor")
: OkStatus(),
OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 2, 3}},
common_input,
{true, true},
0,
{2, 1, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented(
"TensorRT does not allow manipulation of the "
"batch dimension")
: OkStatus(),
},
{
{{1, 1, 2, 3}, {1, 1, 3, 2}},
common_input,
{true, true},
1,
{2, 1, 2, 3},
CreateVectorIota<int>(12),
trt_mode_ != TrtTestMode::kDynamicShape
? errors::InvalidArgument(
"Received inputs with inconsistent shape")
: OkStatus(),
errors::InvalidArgument(""),
}};
for (auto p : params) {
Reset();
const int num_inputs = p.input_shapes.size();
EXPECT_EQ(num_inputs, p.input_values.size());
NodeDef node_def = get_concat_nodedef(tf_type_, num_inputs);
for (int j = 0; j < num_inputs; ++j) {
string name = StrCat("values_", j);
if (!p.inputs_are_tensors[j]) {
AddTestWeights(name, p.input_shapes[j], p.input_values[j], tf_type_);
} else {
AddTestTensor(name, p.input_shapes[j], p.input_values[j]);
}
}
AddTestWeights<int32>("axis", {1}, {p.axis});
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.run_status, ElementsAreArray(p.expected_output));
}
}
auto get_split_nodedef = [](DataType dtype, int num_split) -> NodeDef {
Scope s = Scope::NewRootScope();
auto axis = ops::Placeholder(s.WithOpName("axis"), DT_INT32);
auto value = ops::Placeholder(s.WithOpName("value"), dtype);
auto split = ops::Split(s.WithOpName("my_split"), axis, value, num_split);
return split.operation.node()->def();
};
template <DataType dtype>
void TestConvertSplit(OpConverterTest* test) {
typedef typename EnumToDataType<dtype>::Type CType;
struct TestParams {
std::vector<int> input_shape;
std::vector<CType> value;
int axis;
int num_split;
std::vector<int> expected_output_dims;
std::vector<std::vector<CType>> expected_outputs;
};
const std::vector<CType> common_input = CreateVectorIota<CType>(6);
std::vector<TestParams> ok_params = {
{{1, 2, 3}, common_input, 1,
1, {1, 2, 3},
{CreateVectorIota<CType>(6)}},
{{1, 2, 3},
common_input,
3,
3,
{1, 2, 1},
{{CType(0), CType(3)}, {CType(1), CType(4)}, {CType(2), CType(5)}}},
{{1, 6},
common_input,
2,
6,
{1, 1},
{{CType(0)},
{CType(1)},
{CType(2)},
{CType(3)},
{CType(4)},
{CType(5)}}},
{{1, 6},
common_input,
-1,
2,
{1, 3},
{CreateVectorIota<CType>(3), CreateVectorIota<CType>(3, CType(3))}},
};
for (int i = 0; i < ok_params.size(); ++i) {
test->Reset();
NodeDef node_def = get_split_nodedef(dtype, ok_params[i].num_split);
test->AddTestWeights<int32>("axis", {1}, {ok_params[i].axis});
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(dtype, &trt_type));
test->AddTestTensor("value", ok_params[i].input_shape, 1, trt_type);
test->RunValidationAndConversion(node_def);
EXPECT_EQ(ok_params[i].expected_outputs.size(), ok_params[i].num_split);
std::vector<TRT_TensorOrWeights> outputs(ok_params[i].num_split);
DataVec output_data;
for (int j = 0; j < outputs.size(); ++j) {
const string name = j == 0 ? StrCat("my_split") : StrCat("my_split:", j);
TF_EXPECT_OK(test->GetTensorOrWeights(name, &outputs[j]));
EXPECT_TRUE(outputs[j].is_tensor());
EXPECT_THAT(outputs[j].tensor()->getDimensions(),
DimsAreArray(ok_params[i].expected_output_dims));
output_data.push_back(
{name, test->ConstructTensor<CType>(
ok_params[i].expected_outputs[j].size())});
}
const DataVec input_data{
{"value", test->AsTensor<CType>(ok_params[i].value)}};
TF_EXPECT_OK(test->BuildAndRun(input_data, &output_data));
for (int j = 0; j < outputs.size(); ++j) {
EXPECT_THAT(GetSpanForData<CType>(output_data[j]),
ElementsAreArray(ok_params[i].expected_outputs[j]));
}
}
}
TEST_F(OpConverterTest, ConvertSplit) {
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestTensor("axis", {1});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"axis\" for Split must be a constant");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {4});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {-5});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {0});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the "
"batch dimension");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 1);
AddTestWeights<int32>("axis", {1}, {1});
AddTestWeights<float>("value", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"value\" for Split must be a tensor");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 2);
AddTestWeights<int32>("axis", {1}, {3});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 3 of size 3 is not evenly divisible by 2");
}
{
Reset();
NodeDef node_def = get_split_nodedef(DT_FLOAT, 4);
AddTestWeights<int32>("axis", {1}, {3});
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 3 of size 3 is not evenly divisible by 4");
}
TestConvertSplit<DT_FLOAT>(this);
TestConvertSplit<DT_HALF>(this);
TestConvertSplit<DT_INT32>(this);
}
auto get_unpack_nodedef = [](DataType dtype, int num, int axis) -> NodeDef {
Scope s = Scope::NewRootScope();
auto value = ops::Placeholder(s.WithOpName("value"), dtype);
auto unstack_attrs = ops::Unstack::Axis(axis);
auto unstack =
ops::Unstack(s.WithOpName("my_unpack"), value, num, unstack_attrs);
return unstack.operation.node()->def();
};
struct UnpackTestParams {
std::vector<int> input_shape;
std::vector<float> input_value;
int axis;
int num;
std::vector<int> expected_output_dims;
std::vector<std::vector<float>> expected_outputs;
Status run_status;
};
void TestConvertUnpack(ParameterizedOpConverterTestBase* test,
UnpackTestParams& p) {
test->Reset();
NodeDef node_def = get_unpack_nodedef(test->get_tf_type(), p.num, p.axis);
test->AddTestTensor("value", p.input_shape, test->get_tf_type(),
p.input_value);
std::vector<Matcher<std::vector<float>>> matcher_vec;
std::vector<DataType> datatype_vec;
std::vector<std::vector<int>> expected_output_dims;
for (int j = 0; j < p.expected_outputs.size(); ++j) {
matcher_vec.push_back(ElementsAreArray(p.expected_outputs[j]));
datatype_vec.push_back(test->get_tf_type());
expected_output_dims.push_back(p.expected_output_dims);
}
test->TestOpConverterMultiOut(node_def,
expected_output_dims,
p.run_status,
p.run_status,
matcher_vec,
datatype_vec);
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertUnpack) {
if (trt_mode_ != TrtTestMode::kDynamicShape) {
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 3, 3);
AddTestWeights<float>("value", {1, 1, 2, 3}, {1, 2, 3, 4, 5, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"value\" for Unpack must be a tensor");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 4);
AddTestTensor("value", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of 4 is out of bounds, must be in "
"range [-4, 4)");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, -5);
AddTestTensor("value", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be "
"in range [-4, 4)");
}
{
if (trt_mode_ != TrtTestMode::kExplicitBatch) {
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 0);
AddTestTensor("value", {1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of "
"the batch dimension");
}
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 5, 2);
AddTestTensor("value", {1, 1, 6});
RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
"Dimension 2 has size 6 which is not equal to num of 5");
}
{
Reset();
NodeDef node_def = get_unpack_nodedef(tf_type_, 1, 0);
AddTestTensor(
"value", {}, tf_type_, {}, {},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument(
"removing first dim requires explicit batch dimension")
: OkStatus());
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
RunValidationAndConversion(
node_def, absl::StatusCode::kInternal,
"Failed to convert at least one input to a TRT_TensorOrWeights: "
"Scalar input tensor is not supported since the first dimension is "
"treated as batch dimension by TRT");
} else {
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Input \"value\" for Unpack must be rank 2 "
"or greater");
}
}
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
Status run_status =
trt_mode_ == TrtTestMode::kDynamicShape
? errors::InvalidArgument(
"The argument `strided_slice_spec` is "
"`std::nullopt` with `dynamic_input_size_indices` non empty.")
: OkStatus();
std::vector<UnpackTestParams> params = {
{{1, 1, 2, 1, 3, 1},
common_input,
4,
3,
{1, 1, 2, 1, 1},
{{0, 3}, {1, 4}, {2, 5}},
run_status},
{{1, 1, 2, 1, 3},
common_input,
4,
3,
{1, 1, 2, 1},
{{0, 3}, {1, 4}, {2, 5}},
run_status},
{{1, 1, 2, 3},
common_input,
1,
1,
{1, 2, 3},
{CreateVectorIota<float>(6)},
run_status},
{{1, 6, 1},
common_input,
-2,
6,
{1, 1},
{{0}, {1}, {2}, {3}, {4}, {5}},
run_status},
{{1, 6},
common_input,
1,
6,
{1},
{{0}, {1}, {2}, {3}, {4}, {5}},
run_status},
};
for (auto p : params) {
TestConvertUnpack(this, p);
}
}
NodeDef GetPackNodeDef(DataType dtype, int num_inputs, int axis) {
Scope s = Scope::NewRootScope();
std::vector<Input> values;
values.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
const string input_name = StrCat("values_", i);
values.push_back(ops::Placeholder(s.WithOpName(input_name), dtype));
}
auto pack =
ops::Stack(s.WithOpName("my_pack"), absl::Span<const Input>(values),
ops::Stack::Axis(axis));
return pack.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertPack) {
struct TestParams {
std::vector<std::vector<int>> input_shapes;
std::vector<std::vector<int>> partial_input_shapes;
std::vector<std::vector<float>> input_values;
int axis;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status conversion_status;
Status runtime_status;
bool input_1_is_weight;
};
const std::vector<std::vector<float>> common_input{
CreateVectorIota<float>(6),
CreateVectorIota<float>(6, 6)};
std::vector<TestParams> params = {
{{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
1,
{1, 2, 2, 3},
CreateVectorIota<float>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"The input \"values_1\" for Pack must be a tensor"}
: OkStatus(),
OkStatus(),
true},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
-5,
{},
{},
Status{absl::StatusCode::kInvalidArgument,
"Axis value of -5 is out of bounds, must be in"
" range [-4, 4)"},
},
{{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
-4,
{2, 1, 2, 3},
CreateVectorIota<float>(12),
trt_mode_ == TrtTestMode::kImplicitBatch
? Status{absl::StatusCode::kUnimplemented,
"TensorRT does not allow manipulation of the batch "
"dimension"}
: OkStatus()},
{
{{1, 2, 3}, {1, 6}},
{{}, {}},
common_input,
1,
{},
{},
Status{absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent rank"},
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
1,
{1, 2, 2, 3},
CreateVectorIota<float>(12),
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
2,
{1, 2, 2, 3},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11},
},
{
{{1, 2, 3}, {1, 2, 3}},
{{}, {}},
common_input,
3,
{1, 2, 3, 2},
{0, 6, 1, 7, 2, 8, 3, 9, 4, 10, 5, 11},
},
{
{{1, 2, 3}},
{{}},
{CreateVectorIota<float>(6)},
1,
{1, 1, 2, 3},
CreateVectorIota<float>(6),
},
{
{{1, 2, 3}},
{{}},
{CreateVectorIota<float>(6)},
2,
{1, 2, 1, 3},
CreateVectorIota<float>(6),
},
};
if (trt_mode_ != TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{{1, 2, 3}, {1, 3, 2}},
{{}, {}},
common_input,
1,
{},
CreateVectorIota<float>(12),
Status{absl::StatusCode::kInvalidArgument,
"Received inputs with inconsistent shape"}});
} else {
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
params.push_back(
TestParams{{{1, 2, 3}, {1, 2, 3}},
{{-1, -1, -1}, {1, 2, 3}},
common_input,
2,
{1, 2, 2, 3},
{0, 1, 2, 6, 7, 8, 3, 4, 5, 9, 10, 11}});
}
for (auto p : params) {
Reset();
const int num_inputs = p.input_shapes.size();
EXPECT_EQ(num_inputs, p.input_values.size());
NodeDef node_def = GetPackNodeDef(tf_type_, num_inputs, p.axis);
for (int j = 0; j < num_inputs; ++j) {
if (j == 1 && p.input_1_is_weight) {
AddTestWeights(StrCat("values_", j), p.input_shapes[j],
p.input_values[j], tf_type_);
} else {
AddTestTensor(StrCat("values_", j), p.input_shapes[j], tf_type_,
p.input_values[j], p.partial_input_shapes[j]);
}
}
TestOpConverter(node_def, p.expected_output_dims, p.conversion_status,
p.runtime_status, ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef GetArgMinMaxNodeDef(DataType input_dtype, DataType output_dtype) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), input_dtype);
auto dimension = ops::Placeholder(s.WithOpName("dimension"), DT_INT32);
auto attrs = OpType::OutputType(output_dtype);
auto arg = OpType(s.WithOpName("my_arg"), input, dimension, attrs);
return arg.operation.node()->def();
}
struct ArgMinMaxTestParams {
std::vector<int> input_shape;
std::vector<float> input_value;
int axis;
std::vector<int> expected_output_dims;
std::vector<int> expected_argmax_output;
std::vector<int> expected_argmin_output;
Status status;
};
template <typename OpType>
void TestConvertArgMinMax(ParameterizedOpConverterTestBase* test,
DataType _tf_type, ArgMinMaxTestParams& p) {
test->Reset();
NodeDef node_def = GetArgMinMaxNodeDef<OpType>(_tf_type,
DT_INT32);
std::vector<int> expected_out;
if (node_def.op() == "ArgMax") {
expected_out = p.expected_argmax_output;
} else if (node_def.op() == "ArgMin") {
expected_out = p.expected_argmin_output;
} else {
ASSERT_TRUE(false);
}
test->AddTestTensor("input", p.input_shape, _tf_type, p.input_value);
test->AddTestWeights("dimension", {1}, {p.axis}, DT_INT32);
test->TestOpConverter(node_def, p.expected_output_dims,
p.status,
OkStatus(),
ElementsAreArray(expected_out), {DT_INT32});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertArgMinMax) {
{
Reset();
NodeDef node_def =
GetArgMinMaxNodeDef<ops::ArgMax>(tf_type_,
DT_INT32);
AddTestTensor("input", {1, 2, 3});
AddTestTensor("dimension", {1});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"dimension\" for ArgMax must be a constant");
}
{
Reset();
NodeDef node_def =
GetArgMinMaxNodeDef<ops::ArgMax>(tf_type_,
DT_INT64);
AddTestTensor("input", {1, 2, 3});
AddTestWeights("dimension", {1}, {3}, DT_INT32);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"Output type int64 is not supported");
}
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<ArgMinMaxTestParams> params = {
{{2, 3},
common_input,
0,
{3},
{1, 1, 1},
{0, 0, 0},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::Unimplemented("TensorRT does not allow manipulation of "
"the batch dimension")
: OkStatus()},
{
{1, 6},
common_input,
1,
{1},
{5},
{0},
},
{
{1, 10},
{-5.0f, 3.0f, 5.0f, 1.0f, 6.0f, -9.0f, 7.0f, 1.0f, 0.0f, -1.0f},
-1,
{1},
{6},
{5},
},
{
{1, 2, 3},
common_input,
2,
{1, 2},
{2, 2},
{0, 0},
},
{
{1, 2, 3},
common_input,
-2,
{1, 3},
{1, 1, 1},
{0, 0, 0},
},
{
{1, 2, 1, 3},
common_input,
3,
{1, 2, 1},
{2, 2},
{0, 0},
},
{
{1, 2, 1, 3},
common_input,
-3,
{1, 1, 3},
{1, 1, 1},
{0, 0, 0},
},
{{1, 2, 1, 1, 3},
common_input,
4,
{1, 2, 1, 1},
{2, 2},
{0, 0},
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
errors::Unimplemented("op is not able to support tensors with 4+"
" dimensions (excluding batch size)")
#else
OkStatus()
#endif
},
{{1, 2, 1, 1, 3},
common_input,
-4,
{1, 1, 1, 3},
{1, 1, 1},
{0, 0, 0},
#if !IS_TRT_VERSION_GE(7, 0, 0, 11)
errors::Unimplemented("op is not able to support tensors with 4+"
" dimensions (excluding batch size)")
#else
OkStatus()
#endif
},
};
for (auto p : params) {
TestConvertArgMinMax<ops::ArgMin>(this, tf_type_, p);
TestConvertArgMinMax<ops::ArgMax>(this, tf_type_, p);
}
}
template <typename OpType>
NodeDef GetDepthSpaceShuffleNodeDef(DataType dtype, int block_size,
string data_format) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto attrs = OpType::DataFormat(data_format);
auto shuffle = OpType(s.WithOpName("my_shuffle"), input, block_size, attrs);
return shuffle.operation.node()->def();
}
struct DepthSpaceShuffleTestParams {
std::vector<int> input_dims;
std::vector<int> input_value;
int block_size;
string data_format;
std::vector<int> expected_output_dims;
std::vector<int> expected_output;
};
template <typename OpType>
void TestConvertDepthSpaceShuffle(
ParameterizedOpConverterTestBase* test,
const std::vector<DepthSpaceShuffleTestParams>& params) {
Status status = OkStatus();
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW");
test->AddTestWeights<float>("input", {1, 4, 1, 1}, {1, 2, 3, 4});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
StrCat("The input \"input\" for ", node_def.op(), " must be a tensor"));
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW");
test->AddTestTensor("input", {1, 16, 32});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kInvalidArgument,
StrCat("The input to ", node_def.op(), " must be rank 4"));
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 2, "NCHW_VECT_C");
test->AddTestTensor("input", {1, 16, 32, 32});
test->RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"Data format NCHW_VECT_C is not supported");
}
if (test->get_trt_mode() != TrtTestMode::kDynamicShape) {
if (std::is_same<OpType, ops::DepthToSpace>::value) {
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::DepthToSpace>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 32, 32});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Number of channels must be divisible by"
" block_size*block_size");
} else {
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::SpaceToDepth>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 9, 32});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Width and height must be divisible by"
" block_size");
}
{
test->Reset();
NodeDef node_def = GetDepthSpaceShuffleNodeDef<ops::SpaceToDepth>(
test->get_tf_type(), 3, "NCHW");
test->AddTestTensor("input", {1, 16, 32, 9});
test->RunValidationAndConversion(node_def,
absl::StatusCode::kInvalidArgument,
"Width and height must be divisible by"
" block_size");
}
}
}
for (auto p : params) {
test->Reset();
const NodeDef node = GetDepthSpaceShuffleNodeDef<OpType>(
test->get_tf_type(), p.block_size, p.data_format);
test->AddTestTensor("input", p.input_dims, p.input_value);
test->TestOpConverter(node, p.expected_output_dims, status, OkStatus(),
ElementsAreArray(p.expected_output));
}
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertDepthToSpace) {
const std::vector<int> common_input = CreateVectorIota<int>(16);
std::vector<DepthSpaceShuffleTestParams> params = {
{
{1, 4, 2, 2},
common_input,
2,
"NCHW",
{1, 1, 4, 4},
{0, 4, 1, 5, 8, 12, 9, 13, 2, 6, 3, 7, 10, 14, 11, 15},
},
{
{1, 2, 2, 4},
common_input,
2,
"NHWC",
{1, 4, 4, 1},
{0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15},
},
{
{1, 16, 1, 1},
common_input,
4,
"NCHW",
{1, 1, 4, 4},
CreateVectorIota<int>(16),
},
{
{1, 2, 2, 8},
CreateVectorIota<int>(32),
2,
"NHWC",
{1, 4, 4, 2},
{0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6,
7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25,
26, 27, 20, 21, 22, 23, 28, 29, 30, 31},
}};
TestConvertDepthSpaceShuffle<ops::DepthToSpace>(this, params);
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertSpaceToDepth) {
const std::vector<int> common_input = CreateVectorIota<int>(16);
std::vector<DepthSpaceShuffleTestParams> params = {
{
{1, 1, 4, 4},
common_input,
2,
"NCHW",
{1, 4, 2, 2},
{0, 2, 8, 10, 1, 3, 9, 11, 4, 6, 12, 14, 5, 7, 13, 15},
},
{
{1, 4, 4, 1},
common_input,
2,
"NHWC",
{1, 2, 2, 4},
{0, 1, 4, 5, 2, 3, 6, 7, 8, 9, 12, 13, 10, 11, 14, 15},
},
{
{1, 1, 4, 4},
common_input,
4,
"NCHW",
{1, 16, 1, 1},
CreateVectorIota<int>(16),
},
{
{1, 4, 4, 2},
CreateVectorIota<int>(32),
2,
"NHWC",
{1, 2, 2, 8},
{0, 1, 2, 3, 8, 9, 10, 11, 4, 5, 6,
7, 12, 13, 14, 15, 16, 17, 18, 19, 24, 25,
26, 27, 20, 21, 22, 23, 28, 29, 30, 31},
},
};
TestConvertDepthSpaceShuffle<ops::SpaceToDepth>(this, params);
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertClipByValue) {
Scope s = Scope::NewRootScope();
auto t = ops::Placeholder(s.WithOpName("t"), tf_type_);
auto clip_value_min =
ops::Placeholder(s.WithOpName("clip_value_min"), tf_type_);
auto clip_value_max =
ops::Placeholder(s.WithOpName("clip_value_max"), tf_type_);
auto clip = ops::ClipByValue(s.WithOpName("my_clip"), t, clip_value_min,
clip_value_max);
const NodeDef& node_def = clip.operation.node()->def();
nvinfer1::DataType trt_type_;
TF_ASSERT_OK(TfTypeToTrtType(tf_type_, &trt_type_));
{
Reset();
AddTestWeights("t", {1, 2, 3}, {1, 2, 3, 4, 5, 6}, tf_type_);
AddTestWeights("clip_value_min", {1}, {1}, tf_type_);
AddTestWeights("clip_value_max", {1}, {5}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"t\" for ClipByValue must be a "
"tensor");
}
{
Reset();
AddTestTensor("t", {1, 2, 3});
AddTestTensor("clip_value_min", {1});
AddTestWeights("clip_value_max", {1}, {1}, tf_type_);
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"clip_value_min\" for ClipByValue "
"must be a constant");
}
{
Reset();
AddTestTensor("t", {1, 2, 3});
AddTestWeights("clip_value_min", {1}, {1}, tf_type_);
AddTestTensor("clip_value_max", {1});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"clip_value_max\" for ClipByValue "
"must be a constant");
}
struct TestParams {
std::vector<int> dims;
int clip_value_min;
int clip_value_max;
std::vector<float> expected_output;
};
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<TestParams> params = {{
{6},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 6},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 2, 3},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 2, 3, 1},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 1, 3, 1, 2},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{1, 1, 3, 1, 2, 1},
2,
4,
{2, 2, 2, 3, 4, 4},
},
{
{2, 1, 3},
-1,
8,
common_input,
}};
for (auto p : params) {
Reset();
AddTestTensor("t", p.dims, tf_type_, common_input);
AddTestWeights("clip_value_min", {1}, {p.clip_value_min}, tf_type_);
AddTestWeights("clip_value_max", {1}, {p.clip_value_max}, tf_type_);
TestOpConverter(node_def, p.dims,
OkStatus(),
OkStatus(),
ElementsAreArray(p.expected_output));
}
}
NodeDef GetSquaredDifferenceNodeDef(DataType dtype) {
Scope s = Scope::NewRootScope();
auto x = ops::Placeholder(s.WithOpName("x"), dtype);
auto y = ops::Placeholder(s.WithOpName("y"), dtype);
auto squared_diff =
ops::SquaredDifference(s.WithOpName("my_squared_diff"), x, y);
return squared_diff.operation.node()->def();
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertSquaredDifference) {
{
Reset();
NodeDef node_def = GetSquaredDifferenceNodeDef(tf_type_);
AddTestWeights<float>("x", {1, 2, 3}, {1, 2, 3, 4, 5, 6});
AddTestTensor("y", {1, 1, 2, 3});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"x\" for SquaredDifference must be "
"a tensor");
}
struct TestParams {
std::vector<int> dims_x;
std::vector<int> dims_y;
std::vector<float> value_x;
std::vector<float> value_y;
std::vector<int> expected_output_dims;
std::vector<float> expected_output;
Status status;
Status runtime_status;
};
const std::vector<float> common_input = CreateVectorIota<float>(6);
std::vector<TestParams> params = {
{{1, 2, 3},
{1, 7, 5},
common_input,
std::vector<float>(7 * 5, 0),
{1, 1, 2, 3},
common_input,
trt_mode_ == TrtTestMode::kDynamicShape
? OkStatus()
: errors::InvalidArgument("Infeasible broadcast scheme"),
errors::Internal(
"Binding index out of range. This can happen if profile is not set, "
"or the network is invalid for the current profile.")},
{
{1, 1, 2, 3},
{1, 1, 2, 3},
common_input,
{0, -1, 3, 0, 10, -7},
{1, 1, 2, 3},
{0, 4, 1, 9, 36, 144},
},
{
{1, 1, 2, 3},
{1, 1, 1, 3},
common_input,
{0, 1, 2},
{1, 1, 2, 3},
{0, 0, 0, 9, 9, 9},
},
};
for (auto p : params) {
Reset();
const NodeDef node = GetSquaredDifferenceNodeDef(tf_type_);
AddTestTensor("x", p.dims_x, p.value_x);
AddTestTensor("y", p.dims_y, p.value_y);
TestOpConverter(node, p.expected_output_dims, p.status, p.runtime_status,
ElementsAreArray(p.expected_output));
}
}
template <typename OpType>
NodeDef MakeResizeNodeDef(DataType dtype, bool align_corners) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto size = ops::Placeholder(s.WithOpName("size"), DT_INT32);
auto attrs = typename OpType::Attrs().AlignCorners(align_corners);
auto resize = OpType(s.WithOpName("my_resize"), input, size, attrs);
return resize.operation.node()->def();
}
struct ResizeTestParams {
std::vector<int> input_dims;
std::vector<int> output_resize_dims;
std::vector<float> input_value;
bool size_as_tensor;
bool align_corners;
std::vector<int> expected_output_dims;
std::vector<float> expected_nearest_output_values;
std::vector<float> expected_bilinear_output_values;
Status status;
};
template <typename OpType>
void TestConvertResize(ParameterizedOpConverterTestBase* test,
ResizeTestParams& p) {
test->Reset();
NodeDef node_def =
MakeResizeNodeDef<OpType>(test->get_tf_type(), p.align_corners);
test->AddTestTensor("input", p.input_dims, test->get_tf_type(),
p.input_value);
if (p.size_as_tensor) {
std::vector<int32> size_dims{2};
std::vector<int32> size_values{p.output_resize_dims};
test->AddTestTensor("size", size_dims, DT_INT32, size_values, size_dims);
} else {
test->AddTestWeights("size", {2}, p.output_resize_dims, DT_INT32);
}
std::vector<float> expected_out;
if (node_def.op() == "ResizeBilinear") {
expected_out = p.expected_bilinear_output_values;
} else if (node_def.op() == "ResizeNearestNeighbor") {
expected_out = p.expected_nearest_output_values;
} else {
ASSERT_TRUE(false);
}
test->TestOpConverter(node_def, p.expected_output_dims,
p.status,
p.status,
ElementsAreArray(expected_out),
{DT_FLOAT});
}
TEST_P(OpConverter_FP32_FP16_Test, ConvertResize) {
{
Reset();
NodeDef node_def = MakeResizeNodeDef<ops::ResizeBilinear>(tf_type_,
true);
AddTestWeights<float>("input", {1, 2}, {1, 2});
AddTestWeights<int>("size", {1, 2}, {1, 2});
RunValidationAndConversion(
node_def, absl::StatusCode::kUnimplemented,
"The input \"input\" for ResizeBilinear must be a "
"tensor");
}
std::vector<ResizeTestParams> params{
{{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
false,
false,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.f, -1.0f, 2.0f, 0.f, -1.0f},
OkStatus()},
{{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
false,
true,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.5f, -1.0f, 2.0f, 0.5f, -1.0f},
OkStatus()}};
if (trt_mode_ != TrtTestMode::kImplicitBatch) {
params.push_back({{1, 1, 2, 1},
{2, 3},
{2.0f, -1.0f},
true,
true,
{1, 2, 3, 1},
{2.0f, 2.0f, -1.0f, 2.0f, 2.0f, -1.0f},
{2.0f, 0.5f, -1.0f, 2.0f, 0.5f, -1.0f},
OkStatus()});
}
for (auto p : params) {
TestConvertResize<ops::ResizeNearestNeighbor>(this, p);
#if IS_TRT_VERSION_GE(7, 1, 0, 0)
if (!p.align_corners) {
p.status = errors::InvalidArgument(
"Cannot Convert Bilinear Resize when align_corners=False");
}
#endif
TestConvertResize<ops::ResizeBilinear>(this, p);
}
}
NodeDef MakePadNodeDef(std::string name, DataType dtype) {
Scope s = Scope::NewRootScope();
auto input = ops::Placeholder(s.WithOpName("input"), dtype);
auto padding = ops::Placeholder(s.WithOpName("padding"), DT_INT32);
auto pad = ops::Pad(s.WithOpName(name), input, padding);
return pad.operation.node()->def();
}
struct PadTestParams {
std::vector<int> input_dims;
std::vector<int> pad_dims;
std::vector<int> pad_values;
std::vector<float> input_values;
std::vector<int> expected_output_dims;
std::vector<float> expected_output_values;
Status status;
};
TEST_P(OpConverter_FP32_FP16_Test, ConvertPad) {
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestWeights("input", {1, 2}, {1, 2}, tf_type_);
AddTestWeights<int>("padding", {1, 2}, {1, 2});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"tensor\" for Pad must be a "
"tensor");
}
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", {1, 2});
AddTestTensor("padding", {1, 2});
RunValidationAndConversion(node_def, absl::StatusCode::kUnimplemented,
"The input \"paddings\" for Pad must be a "
"constant");
}
{
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", {1, 1, 2, 1});
AddTestWeights<int>("padding", {4, 2}, {0, 0, 1, 0, 0, 1, 0, 0});
TRT_TensorOrWeights input;
TRT_TensorOrWeights output;
RunValidationAndConversion(node_def);
TF_EXPECT_OK(GetTensorOrWeights("input", &input));
TF_EXPECT_OK(GetTensorOrWeights("my_pad", &output));
ITensorProxyPtr input_tensor = input.tensor();
converter_->ProvideQuantizationRange(&input_tensor, -5.0f, 5.0f);
auto ranges = quantization_ranges();
EXPECT_EQ(5.0f, ranges[input.tensor()->trt_tensor()]);
}
std::vector<PadTestParams> params{
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 0, 0, 0, 1, 0, 0},
{1, 2, 3, 4, 5, 6},
{1, 1, 4, 2},
{1, 2, 3, 4, 5, 6, 0, 0},
},
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 0, 0, 0, 0, 0, 1},
{1, 2, 3, 4, 5, 6},
{1, 1, 3, 3},
{1, 2, 0, 3, 4, 0, 5, 6, 0},
},
{
{1, 1, 3, 2},
{4, 2},
{0, 0, 1, 0, 0, 0, 0, 0},
{1, 2, 3, 4, 5, 6},
{1, 2, 3, 2},
{0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6},
},
{
{1, 1, 2, 1},
{4, 2},
{0, 0, 1, 0, 0, 1, 0, 0},
{2.0f, -1.0f},
{1, 2, 3, 1},
{0.0, 0.0, 0.0, 2.0f, -1.0f, 0.0},
},
PadTestParams{
{1, 1, 2, 2},
{4, 2},
{0, 0, 1, 0, 0, 1, 0, 0},
{2, -1, 3., 4},
{1, 2, 3, 2},
{0, 0, 0, 0, 0, 0, 2, -1, 3, 4, 0, 0},
},
PadTestParams{
{1, 1, 2, 1, 2},
{5, 2},
{0, 0, 1, 0, 0, 1, 0, 0, 0, 0},
{2, -1, 3., 4},
{1, 2, 3, 1, 2},
{0, 0, 0, 0, 0, 0, 2, -1, 3, 4, 0, 0},
},
PadTestParams{
{1, 1, 2, 1, 2},
{5, 2},
{0, 0, 0, 1, 0, 0, 1, 1, 0, 0},
{2, -1, 3., 4},
{1, 2, 2, 3, 2},
{0., 0., 2., -1., 0., 0., 0., 0., 3., 4., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0},
},
PadTestParams{
{1, 1, 2, 1},
{4, 2},
{1, 0, 0, 0, 0, 1, 0, 0},
{2.0f, -1.0f},
{2, 1, 3, 1},
{0.0, 0.0, 0.0, 2.0f, -1.0f, 0.0},
trt_mode_ == TrtTestMode::kImplicitBatch
? errors::InvalidArgument("Padding layer does not support "
"padding on batch dimension")
: OkStatus()},
PadTestParams{
{1, 1, 2, 1},
{4, 2},
{0, 0, 1, 0, 0, 1, 1, 1},
{2.0f, -1.0f},
{},
{},
errors::InvalidArgument("Padding layer does not support padding on "
"> 2")},
PadTestParams{
{1, 2, 2},
{3, 2},
{0, 0, 1, 0, 0, 1},
{2, -1, 3., 4},
{1, 3, 3},
{0., 0., 0., 2., -1., 0., 3., 4., 0.},
errors::InvalidArgument("Convertpad requires at least 4D input")}};
for (auto p : params) {
Reset();
NodeDef node_def = MakePadNodeDef("my_pad", tf_type_);
AddTestTensor("input", p.input_dims, p.input_values);
AddTestWeights<int32>("padding", p.pad_dims, p.pad_values);
TestOpConverter(node_def, p.expected_output_dims, p.status, p.status,
ElementsAreArray(p.expected_output_values));
}
}
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
class OpConverter_Select : public ParameterizedOpConverterTestBase {
public:
void RunTest(const string& opName);
};
void OpConverter_Select::RunTest(const string& opName) {
const auto testing_SelectV2 = opName == "SelectV2";
const int maxVal = 32;
const std::array<const char*, 3> par_name = {"cond", "then", "else"};
std::array<DataType, 3> par_type = {DT_BOOL, tf_type_, tf_type_};
std::vector<int> config(3, 0);
std::array<const std::vector<int>*, 3> par_dims;
std::vector<float> data_then(1, 0), data_else(1, maxVal),
expected_output(1, maxVal);
std::array<std::vector<float>*, 3> par_value = {nullptr, &data_then,
&data_else};
std::vector<int> data_cond(1, 0);
auto set_parameters = [&](DataType cond_type = DT_BOOL) {
Reset();
if (config[0]) {
AddTestTensor(par_name[0], *par_dims[0], cond_type, data_cond);
} else {
AddTestWeights(par_name[0], {1}, data_cond, cond_type);
}
for (int i = 1; i < 3; i++) {
if (config[i]) {
AddTestTensor(par_name[i], *par_dims[i], par_type[i], *par_value[i]);
} else {
AddTestWeights(par_name[i], {1}, *par_value[i], par_type[i]);
}
}
};
auto set_dimension = [this](const nvinfer1::Dims* dims,
std::vector<int>& dims_param,
std::string* comment = nullptr) {
const auto nbDims = dims->nbDims;
if (comment) {
*comment = "batch_dim: " + std::to_string(nbDims + 1) + ", " +
DebugString(*dims);
}
dims_param.resize(nbDims);
for (int i = 0; i < nbDims; i++) dims_param[i] = dims->d[i];
};
auto adjust_comments = [this](const nvinfer1::Dims* p_dims,
std::string* p_comment) {
if (p_dims[0].nbDims == p_dims[1].nbDims) return;
const int idx = p_dims[0].nbDims < p_dims[1].nbDims ? 0 : 1;
nvinfer1::Dims dims;
dims.nbDims = p_dims[1 - idx].nbDims;
int i = 0;
for (; i < dims.nbDims - p_dims[idx].nbDims; i++) dims.d[i] = 1;
for (int j = i; i < dims.nbDims; i++) dims.d[i] = p_dims[idx].d[i - j];
*(p_comment + idx) =
"batch_dim: " + std::to_string(1) + ", " + DebugString(dims);
*(p_comment + 1 - idx) =
"batch_dim: " + std::to_string(p_dims[idx].nbDims + 1) + ", " +
DebugString(p_dims[1 - idx]);
};
auto assign_values = [this](
const std::array<const std::vector<int>*, 3>& dims,
std::array<std::vector<float>*, 3> par_value,
std::vector<int>& data_cond, int use_indices = 0,
const std::vector<float>* expected_out = nullptr,
std::vector<int>* expect_dims_pntr = nullptr) {
size_t rank[3];
const auto dim_len =
dims[0]->size() > dims[1]->size() ? dims[0]->size() : dims[1]->size();
std::vector<int> exp_dims;
if (!expect_dims_pntr) expect_dims_pntr = &exp_dims;
auto& expect_dims = *expect_dims_pntr;
expect_dims.resize(dim_len);
expect_dims.assign(dim_len, 0);
for (int i = 0; i < 3; i++) {
if (dims[i]) {
const auto& dim = *dims[i];
for (auto j = 0; j < dims[i]->size(); j++) {
if (expect_dims[j] < dim[j]) expect_dims[j] = dim[j];
}
rank[i] = std::accumulate(std::begin(dim), std::end(dim), 1,
std::multiplies<int>());
} else {
assert(i >= 2);
rank[i] = rank[i - 1];
}
}
for (int k = 1; k <= 2; k++) {
auto& data = *par_value[k];
data.resize(rank[k]);
if (use_indices) {
const int mult = k == 1 ? 1 : -1;
for (int i = 0; i < rank[k]; i++) {
data[i] = mult * (i + 1);
}
} else {
for (int i = 0; i < rank[k]; i++) {
data[i] = k == 1 ? data[i >> 1] + i % 2 : maxVal - (*par_value[1])[i];
}
}
}
data_cond.resize(rank[0]);
data_cond[0] = 0;
for (int i = 0; i < rank[0]; i++) {
data_cond[i] = i % 2 ? 1 - data_cond[i >> 1] : data_cond[i >> 1];
}
if (!expected_out || expected_out->size() > 0) {
auto& expected_output = *par_value[0];
const auto rank_out =
std::accumulate(std::begin(expect_dims), std::end(expect_dims), 1,
std::multiplies<int>());
assert(rank_out == (expected_out ? expected_out->size()
: rank[use_indices >= 0 ? 0 : 1]));
expected_output.resize(rank_out);
const auto& data_then = *par_value[1];
const auto& data_else = *par_value[2];
const auto div = use_indices >= 0 ? 1 : rank_out / rank[0];
for (int i = 0; i < rank_out; i++) {
expected_output[i] = expected_out ? (*expected_out)[i]
: data_cond[i / div] ? data_then[i]
: data_else[i];
}
}
};
auto shape_error_msg = [&](const NodeDef& node, bool same_then_else = true) {
nvinfer1::Dims shape[3];
const auto j = same_then_else ? 0 : 1;
if (trt_mode_ == TrtTestMode::kDynamicShape) {
for (int i = 0; i < 2; i++) {
for (int j = shape[i].nbDims = par_dims[i]->size(); j--;) {
shape[i].d[j] = -1;
}
}
} else {
for (int i = 0; i < 2; i++) {
DimsAdapter(*par_dims[i + j]).TrtDims(&shape[i + j]);
}
}
return input_shapes_error_msg(shape[j], shape[j + 1], node,
!same_then_else);
};
auto run_test = [&](const NodeDef& node, const std::vector<int>& exp_dims) {
const bool same_then_else_shapes = *par_dims[1] == *par_dims[2];
const bool same_cond_chape = *par_dims[0] == *par_dims[1];
const auto nMax = testing_SelectV2 ? 2 : 1;
for (int n = 0; n < nMax; n++) {
set_parameters();
if (testing_SelectV2 || (same_then_else_shapes && same_cond_chape)) {
TestOpConverter(node, exp_dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
const auto err_msg = shape_error_msg(node, same_then_else_shapes);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
if (!n) {
for (auto idx = data_cond.size(); idx--;)
data_cond[idx] = 1 - data_cond[idx];
if (!same_then_else_shapes) {
for (int p = 1; p <= 2; p++) {
auto& values = *par_value[p];
const auto val = p == 1 ? 1 : -1;
for (auto idx = values.size(); idx--;) values[idx] = val;
}
for (auto idx = expected_output.size(); idx--;)
expected_output[idx] = expected_output[idx] > 0 ? -1 : 1;
} else {
for (auto idx = expected_output.size(); idx--;)
expected_output[idx] = -expected_output[idx];
}
}
}
};
std::array<DataType, 3> data_types = {DT_FLOAT, DT_HALF, DT_INT32};
NodeDef node;
TF_CHECK_OK(NodeDefBuilder("op", opName)
.Input("cond", 0, DT_BOOL)
.Input("then", 0, tf_type_)
.Input("else", 0, tf_type_)
.Finalize(&node));
const std::vector<std::vector<int>> dims_params = {
{8}, {8, 2, 4}, {32, 32, 3200}};
par_dims = {&dims_params[0], &dims_params[0], &dims_params[0]};
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
const auto& err = convert_not_supported_implicit(node.op(), node.name());
do {
set_parameters();
RunValidationAndConversion(node, absl::StatusCode::kUnimplemented, err);
} while (nextTensorWeightConfiguration(config));
return;
}
do {
for (auto cond_type : {DT_INT32, DT_FLOAT, DT_HALF}) {
nvinfer1::DataType trt_type;
TF_ASSERT_OK(TfTypeToTrtType(cond_type, &trt_type));
const auto error_msg =
unexpected_type_error_msg(trt_type, nvinfer1::DataType::kBOOL, node);
set_parameters(cond_type);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
error_msg);
}
} while (nextTensorWeightConfiguration(config));
std::string err_msg = bool_weight_error_msg(node);
std::vector<int> dims_const = {1};
par_dims = {&dims_const, &dims_const, &dims_const};
for (int i = 0; i < 2; i++) {
do {
set_parameters();
if (config[0]) {
TestOpConverter(node, {1}, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
} while (nextTensorWeightConfiguration(config));
data_cond[0] = 1 - data_cond[0];
expected_output[0] = (*par_value[1 + i])[0];
}
for (int i = 0; i < 3; i++) {
config[i] = 1;
}
par_value[0] = &expected_output;
if (trt_mode_ == TrtTestMode::kExplicitBatch) {
std::string bc_comment[2];
std::vector<int> dims[4];
par_dims = {dims, dims + 1, dims + 1};
const nvinfer1::Dims infeasible_dims[] = {
{3, {4, 3, 2}}, {4, {4, 3, 2, 5}}, {3, {4, 1, 3}},
{3, {4, 3, 2}}, {3, {4, 3, 2}}, {5, {4, 3, 2, 5, 2}}};
auto iMax = sizeof(infeasible_dims) / sizeof(infeasible_dims[0]);
for (int i = 0; i < iMax; i += 2) {
for (int k = 0; k < 2; k++) {
for (int j = 0; j < 2; j++) {
set_dimension(infeasible_dims + i + (j + k) % 2, dims[j],
bc_comment + (j + k) % 2);
}
if (testing_SelectV2) {
adjust_comments(infeasible_dims + i, bc_comment);
err_msg = "Infeasible broadcast scheme (" + bc_comment[k] + " vs " +
bc_comment[1 - k];
} else {
err_msg = shape_error_msg(node);
}
set_parameters();
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
}
const nvinfer1::Dims feasible_dims_2[] = {
{3, {1, 3, 2}}, {3, {4, 3, 2}}, {3, {4, 1, 2}}, {3, {4, 3, 2}},
{3, {4, 3, 1}}, {3, {4, 3, 2}}, {3, {1, 1, 2}}, {3, {4, 3, 2}},
{3, {1, 3, 1}}, {3, {4, 3, 2}}, {3, {4, 1, 1}}, {3, {4, 3, 2}},
{3, {1, 1, 1}}, {3, {4, 3, 2}}, {3, {1, 3, 2}}, {3, {4, 1, 2}},
};
const std::vector<float> expected_val_2[] = {
{-1, 2, 3, -4, 5, -6, -7, 8, 9, -10, 11, -12,
-13, 14, 15, -16, 17, -18, -19, 20, 21, -22, 23, -24},
{-1, 2, 3, -4, 5, -6, -1, 2, 3, -4, -5, 6,
-1, 2, 3, -4, 5, -6, -1, 2, -3, 4, 5, -6},
{-1, 2, -3, 4, -5, 6, 7, -8, 9, -10, 11, -12,
13, -14, 15, -16, 17, -18, -19, 20, -21, 22, -23, 24},
{-1, 2, 1, -2, 1, -2, -3, 4, 3, -4, -3, 4,
-5, 6, 5, -6, 5, -6, -7, 8, -7, 8, 7, -8},
{-1, -2, 3, 4, 5, 6, -7, -8, 9, 10, -11, -12,
-13, -14, 15, 16, 17, 18, -19, -20, -21, -22, 23, 24},
{-1, 1, 2, -2, 3, -3, -4, 4, 5, -5, -6, 6,
-7, 7, 8, -8, 9, -9, -10, 10, -11, 11, 12, -12},
{-1, 2, -3, 4, -5, 6, -7, 8, -9, 10, -11, 12,
-13, 14, -15, 16, -17, 18, -19, 20, -21, 22, -23, 24},
{-1, 2, 1, -2, 1, -2, -1, 2, 1, -2, -1, 2,
-1, 2, 1, -2, 1, -2, -1, 2, -1, 2, 1, -2},
{-1, -2, 3, 4, 5, 6, -7, -8, 9, 10, 11, 12,
-13, -14, 15, 16, 17, 18, -19, -20, 21, 22, 23, 24},
{-1, 1, 2, -2, 3, -3, -1, 1, 2, -2, -3, 3,
-1, 1, 2, -2, 3, -3, -1, 1, -2, 2, 3, -3},
{-1, -2, -3, -4, -5, -6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, -19, -20, -21, -22, -23, -24},
{-1, 1, 1, -1, 1, -1, -2, 2, 2, -2, -2, 2,
-3, 3, 3, -3, 3, -3, -4, 4, -4, 4, 4, -4},
{-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12,
-13, -14, -15, -16, -17, -18, -19, -20, -21, -22, -23, -24},
{-1, 1, 1, -1, 1, -1, -1, 1, 1, -1, -1, 1,
-1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1},
{-1, 2, 1, -2, 1, -2, -3, 4, 3, -4, 3, -4,
-5, 6, 5, -6, 5, -6, -7, 8, 7, -8, 7, -8},
{-1, 2, -3, 4, -5, 6, 1, -2, 3, -4, 5, -6,
1, -2, 3, -4, 5, -6, -1, 2, -3, 4, -5, 6},
{-1, 2, 3, -4, 5, -6, -7, 2, 3, -10, -11, 6,
-13, 2, 3, -16, 5, -18, -19, 2, -21, 4, 5, -24},
{-1, 2, 3, -4, 5, -6, -1, 8, 9, -4, 11, -6,
-1, 14, 15, -4, 17, -6, -1, 20, 21, -4, 23, -6},
{-1, 2, 1, -4, 1, -6, -7, 4, 3, -10, -11, 4,
-13, 6, 5, -16, 5, -18, -19, 8, -21, 8, 7, -24},
{-1, 2, -1, 4, -1, 6, 7, -4, 9, -4, 11, -4,
13, -6, 15, -6, 17, -6, -7, 20, -7, 22, -7, 24},
{-1, 1, 2, -4, 3, -6, -7, 4, 5, -10, -11, 6,
-13, 7, 8, -16, 9, -18, -19, 10, -21, 11, 12, -24},
{-1, -1, 3, 4, 5, 6, -4, -4, 9, 10, -6, -6,
-7, -7, 15, 16, 17, 18, -10, -10, -11, -11, 23, 24},
{-1, 2, 1, -4, 1, -6, -7, 2, 1, -10, -11, 2,
-13, 2, 1, -16, 1, -18, -19, 2, -21, 2, 1, -24},
{-1, 2, -1, 4, -1, 6, -1, 8, -1, 10, -1, 12,
-1, 14, -1, 16, -1, 18, -1, 20, -1, 22, -1, 24},
{-1, 1, 2, -4, 3, -6, -7, 1, 2, -10, -11, 3,
-13, 1, 2, -16, 3, -18, -19, 1, -21, 2, 3, -24},
{-1, -1, 3, 4, 5, 6, -1, -1, 9, 10, 11, 12,
-1, -1, 15, 16, 17, 18, -1, -1, 21, 22, 23, 24},
{-1, 1, 1, -4, 1, -6, -7, 2, 2, -10, -11, 2,
-13, 3, 3, -16, 3, -18, -19, 4, -21, 4, 4, -24},
{-1, -1, -1, -1, -1, -1, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, -4, -4, -4, -4, -4, -4},
{-1, 1, 1, -4, 1, -6, -7, 1, 1, -10, -11, 1,
-13, 1, 1, -16, 1, -18, -19, 1, -21, 1, 1, -24},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, 2, -1, 4, -1, 6, 1, -4, 3, -4, 5, -4,
1, -6, 3, -6, 5, -6, -7, 2, -7, 4, -7, 6},
{-1, 2, 1, -4, 1, -6, -1, 4, 3, -4, 3, -6,
-1, 6, 5, -4, 5, -6, -1, 8, 7, -4, 7, -6}};
const auto exp_dims = dims + 3;
const int kMax2 = 2;
iMax = sizeof(feasible_dims_2) / sizeof(feasible_dims_2[0]);
assert(kMax2 * iMax / 3 ==
sizeof(expected_val_2) / sizeof(expected_val_2[0]));
for (int i = 0; i < iMax; i += 2) {
for (int k = 0; k < kMax2; k++) {
for (int j = 0; j < 2; j++)
set_dimension(feasible_dims_2 + i + (j + k) % 2, dims[j]);
const std::vector<float>* expect = expected_val_2 + i + k;
for (int m = 0; m < 2; m++) {
assign_values(par_dims, par_value, data_cond, 1, expect, exp_dims);
run_test(node, *exp_dims);
const auto tmp = par_dims[0];
par_dims[0] = par_dims[1];
par_dims[1] = tmp;
expect += iMax;
}
}
}
const nvinfer1::Dims feasible_dims_3[] = {
{2, {3, 2}}, {2, {3, 1}}, {2, {1, 1}}, {3, {2, 2, 1}},
{3, {2, 1, 2}}, {3, {1, 2, 2}}, {3, {2, 1, 1}}, {3, {2, 1, 2}},
{3, {1, 2, 2}}, {3, {2, 1, 1}}, {3, {1, 1, 2}}, {3, {1, 2, 1}},
};
const std::vector<float> expected_val_3[] = {
{-1, 1, 2, -1, 3, -1}, {-1, 1, 1, -2, 1, -3},
{-1, -1, 3, 4, 5, 6}, {-1, -2, 1, 1, 1, 1},
{-1, -1, -2, -2, -3, -3}, {-1, -2, -3, -4, -5, -6},
{-1, -2, 1, 2, 3, 4, -3, -4}, {-1, -2, 3, 4, 1, 2, -3, -4},
{-1, 1, -3, 2, 3, -2, 4, -4}, {-1, 2, -2, 4, 1, -3, 3, -4},
{-1, 1, 2, -2, -3, 3, 4, -4}, {-1, 2, 1, -2, -3, 4, 3, -4},
{-1, -2, -3, -4, 3, 4, 3, 4}, {-1, -2, -1, -2, 1, 2, 3, 4},
{-1, 1, -3, 1, 2, -2, 2, -4}, {-1, 2, -1, 4, 1, -2, 3, -2},
{-1, 1, 1, -2, -3, 2, 2, -4}, {-1, 2, 1, -1, -2, 4, 3, -2},
{-1, -1, -2, -2, 1, 2, 1, 2}, {-1, -2, -1, -2, 1, 1, 2, 2},
{-1, 1, -2, 1, -1, 2, -2, 2}, {-1, 1, -1, 2, -2, 1, -2, 2},
{-1, -2, 1, 1, -1, -2, 2, 2}, {-1, -1, 1, 2, -2, -2, 1, 2},
};
const int kMax3 = 6;
const std::array<int, 3> perm[kMax3] = {{0, 1, 2}, {0, 2, 1}, {1, 0, 2},
{1, 2, 0}, {2, 0, 1}, {2, 1, 0}};
par_dims = {dims, dims + 1, dims + 2};
iMax = sizeof(feasible_dims_3) / sizeof(feasible_dims_3[0]);
assert(kMax3 * iMax / 3 ==
sizeof(expected_val_3) / sizeof(expected_val_3[0]));
for (int i = 0; i < iMax; i += 3) {
for (int k = 0; k < kMax3; k++) {
for (int j = 0; j < 3; j++)
set_dimension(feasible_dims_3 + i + perm[k][j], dims[j]);
const auto* expect = expected_val_3 + kMax3 * (i / 3) + k;
assign_values(par_dims, par_value, data_cond, 1, expect, exp_dims);
run_test(node, *exp_dims);
}
}
if (!testing_SelectV2) {
const nvinfer1::Dims vect_dim[] = {
{1, {4}}, {3, {5, 2, 3}}, {2, {5, 2}}, {3, {5, 2, 3}},
{1, {5}}, {3, {5, 2, 3}}, {1, {4}}, {4, {4, 3, 5, 2}},
};
std::vector<int> dims[4];
par_dims = {dims, dims + 1, dims + 1};
auto iMax = sizeof(vect_dim) / sizeof(vect_dim[0]);
for (int i = 0; i < iMax; i += 2) {
err_msg =
vect_dim[i].nbDims != 1 || vect_dim[i].d[0] != vect_dim[i + 1].d[0]
? input_shapes_error_msg(vect_dim[i], vect_dim[i + 1], node)
: "";
for (int j = 0; j < 2; j++) {
set_dimension(vect_dim + i + j, dims[j]);
}
assign_values(par_dims, par_value, data_cond, -1);
set_parameters();
if (err_msg.empty()) {
TestOpConverter(node, dims[1], OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
}
}
}
for (auto dims : dims_params) {
par_dims = {&dims, &dims, &dims};
assign_values(par_dims, par_value, data_cond);
for (const auto type_else : data_types) {
par_type[2] = type_else;
set_parameters();
if ((par_type[1] == DT_INT32 || par_type[2] == DT_INT32) &&
par_type[1] != par_type[2]) {
nvinfer1::DataType trt_type[2];
for (int i = 0; i < 2; i++) {
TF_ASSERT_OK(TfTypeToTrtType(par_type[i + 1], trt_type + i));
}
err_msg = then_else_dtypes_error_msg(trt_type[0], trt_type[1], node);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
} else {
TestOpConverter(node, dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
}
}
par_type[2] = tf_type_;
}
if (trt_mode_ == TrtTestMode::kDynamicShape) {
std::vector<float> values_then{1, 2, 3, 4, 5, 6};
std::vector<float> values_else{-1, -2, -3, -4, -5, -6};
std::vector<float> expected_output{1, -2, 3, 4, -5, 6};
data_cond = std::vector<int>{1, 0, 1};
const std::vector<int> cond_dims{1, 3}, input_dims{1, 2, 3};
par_dims = {&cond_dims, &input_dims, &input_dims};
const auto len_cond = data_cond.size();
for (int i = 0; i < 2; i++) {
par_value[i + 1] = &values_then;
par_value[2 - i] = &values_else;
for (int j = 0; j < values_then.size(); j++) {
expected_output[j] = par_value[2 - data_cond[j % len_cond]]->at(j);
}
set_parameters();
if (testing_SelectV2) {
TestOpConverter(node, input_dims, OkStatus(), OkStatus(),
ElementsAreArray(expected_output));
} else {
const auto err_msg = shape_error_msg(node);
RunValidationAndConversion(node, absl::StatusCode::kInvalidArgument,
err_msg);
}
for (int j = len_cond; j--;) {
data_cond[j] = 1 - data_cond[j];
}
}
}
}
INSTANTIATE_TEST_CASE_P(
OpConvTestInstantiation, OpConverter_Select,
::testing::Combine(::testing::ValuesIn(ValidTrtModes),
::testing::Values(DT_FLOAT, DT_HALF, DT_INT32),
::testing::Values(TrtPrecisionMode::FP32)));
TEST_P(OpConverter_Select, ConvertSelectV2) { RunTest("SelectV2"); }
TEST_P(OpConverter_Select, Convert_Select) { RunTest("Select"); }
TEST_F(OpConverterTest, DuplicateSqueeze) {
auto op_converter = [](const OpConverterParams* params) -> Status {
if (params->validation_only) return OkStatus();
auto input = params->inputs.at(0).tensor();
ITensorProxyPtr output;
std::vector<int> new_dims = {0, 1, 2, 3};
TF_EXPECT_OK(params->converter->SqueezeTensor(
input, &new_dims, params,
&output, 0));
new_dims = {0, 2, 3};
TF_EXPECT_OK(params->converter->SqueezeTensor(
output, &new_dims, params,
&output, 1));
params->outputs->push_back(TRT_TensorOrWeights(output));
return OkStatus();
};
NodeDef node_def = CreateUnaryOp<ops::Abs>(DataType::DT_FLOAT);
AddTestTensor("input", {1, 1, 2, 3});
GetOpConverterRegistry()->Register("Abs", kDefaultConverterPriority + 1,
op_converter);
RunValidationAndConversion(node_def);
DataVec input_data;
DataVec output_data;
InputOutputData abs_input{
"input", ConstructTensor<float>(6, 0,
DataType::DT_FLOAT)};
InputOutputData abs_output{
"my_unary", ConstructTensor<float>(6, 0,
DataType::DT_FLOAT)};
input_data.push_back(abs_input);
output_data.push_back(abs_output);
TF_EXPECT_OK(BuildAndRun(input_data, &output_data));
}
#endif
}
}
}
int main(int argc, char** argv) {
#if IS_TRT_VERSION_GE(8, 2, 0, 0)
std::unique_ptr<nvinfer1::IBuilder> const holder{
nvinfer1::createInferBuilder(*tensorflow::tensorrt::Logger::GetLogger())};
#endif
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
#else
int main(int, char**) { return 0; }
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dfecc821-d7b7-48f6-9ad7-d9bbaf890ca2 | cpp | tensorflow/tensorflow | slice_op | tensorflow/compiler/tf2xla/kernels/slice_op.cc | tensorflow/core/kernels/slice_op_test.cc | #include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/lib/dynamic_shaped_ops.h"
#include "xla/hlo/builder/value_inference.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
class SliceOp : public XlaOpKernel {
public:
explicit SliceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape input_shape = ctx->InputShape(0);
const TensorShape begin_tensor_shape = ctx->InputShape(1);
const TensorShape size_tensor_shape = ctx->InputShape(2);
const int input_dims = input_shape.dims();
OP_REQUIRES(
ctx,
TensorShapeUtils::IsVector(begin_tensor_shape) &&
TensorShapeUtils::IsVector(size_tensor_shape) &&
begin_tensor_shape.num_elements() == input_dims &&
size_tensor_shape.num_elements() == input_dims,
errors::InvalidArgument(
"Expected begin and size arguments to be 1-D tensors of size ",
input_dims, ", but got shapes ", begin_tensor_shape.DebugString(),
" and ", size_tensor_shape.DebugString(), " instead."));
std::vector<int64_t> begin;
std::vector<int64_t> size;
const bool all_begins_are_constant =
ctx->ConstantInputAsIntVector(1, &begin).ok();
const bool all_sizes_are_constant =
ctx->ConstantInputAsIntVector(2, &size).ok();
if (all_begins_are_constant && all_sizes_are_constant) {
std::vector<int64_t> wrapped_size(size.size());
for (int i = 0; i < input_dims; ++i) {
if (size[i] == -1) {
wrapped_size[i] = input_shape.dim_size(i) - begin[i];
} else {
wrapped_size[i] = size[i];
}
}
for (int i = 0; i < input_dims; ++i) {
int64_t b = begin[i];
int64_t s = wrapped_size[i];
if (input_shape.dim_size(i) == 0) {
OP_REQUIRES(ctx, b == 0 && s == 0,
errors::InvalidArgument(
"Expected begin[", i, "] == 0 (got ", b,
") and size[", i, "] == 0 ", "(got ", s, ") when ",
"input_shape.dim_size(", i, ") == 0"));
} else {
OP_REQUIRES(ctx, 0 <= b && b <= input_shape.dim_size(i),
errors::InvalidArgument("Expected begin[", i, "] in [0, ",
input_shape.dim_size(i),
"], but got ", b));
OP_REQUIRES(ctx, 0 <= s && b + s <= input_shape.dim_size(i),
errors::InvalidArgument("Expected size[", i, "] in [0, ",
input_shape.dim_size(i) - b,
"], but ", "got ", s));
}
}
std::vector<int64_t> limits;
limits.reserve(begin.size());
for (int i = 0; i < begin.size(); ++i) {
limits.push_back(begin[i] + wrapped_size[i]);
}
std::vector<int64_t> strides(begin.size(), 1);
auto slice = xla::Slice(ctx->Input(0), begin, limits, strides);
std::vector<bool> size_is_dynamic;
OP_REQUIRES_OK(
ctx, ctx->ResolveInputDynamismIntoPredVector(2, &size_is_dynamic));
for (int64_t i = 0; i < size.size(); ++i) {
if (size_is_dynamic[i]) {
if (size[i] != -1) {
auto dynamic_size =
xla::Reshape(xla::Slice(ctx->Input(2), {i}, {i + 1}, {1}), {});
slice = xla::SetDimensionSize(slice, dynamic_size, i);
}
}
}
ctx->SetOutput(0, slice);
} else {
bool constant_size_is_minus_one = false;
if (all_sizes_are_constant) {
for (int i = 0; i < input_dims; ++i) {
if (size[i] < 0) {
OP_REQUIRES(ctx, size[i] == -1,
errors::InvalidArgument(
"Negative size of slice operator can only be -1"));
constant_size_is_minus_one = true;
}
OP_REQUIRES(ctx, size[i] <= input_shape.dim_size(i),
errors::InvalidArgument("Expected size[", i, "] in [0, ",
input_shape.dim_size(i),
"], but ", "got ", size[i]));
}
}
absl::InlinedVector<xla::XlaOp, 4> begin_indices;
begin_indices.reserve(input_dims);
xla::XlaOp begin = ctx->Input("begin");
for (int i = 0; i < input_dims; i++) {
begin_indices.push_back(
xla::Reshape(xla::Slice(begin, {i}, {i + 1}, {1}), {}));
}
if (all_sizes_are_constant && !constant_size_is_minus_one) {
xla::XlaOp input = ctx->Input(0);
ctx->SetOutput(0, xla::DynamicSlice(input, begin_indices, size));
} else {
xla::PaddingConfig padding_config;
xla::XlaOp input = ctx->Input(0);
for (int64_t i = 0; i < input_dims; ++i) {
auto* dims = padding_config.add_dimensions();
dims->set_edge_padding_low(0);
dims->set_edge_padding_high(input_shape.dim_size(i));
dims->set_interior_padding(0);
input = xla::RemoveDynamicDimension(input, i);
}
auto padded_input =
xla::Pad(input, xla::Zero(ctx->builder(), ctx->input_xla_type(0)),
padding_config);
auto sliced = xla::DynamicSlice(padded_input, begin_indices,
input_shape.dim_sizes());
for (int i = 0; i < input_dims; i++) {
xla::XlaOp dynamic_size =
xla::Reshape(xla::Slice(ctx->Input(2), {i}, {i + 1}, {1}), {});
if (constant_size_is_minus_one && size[i] == -1) {
dynamic_size = xla::ConstantR0<int32>(ctx->builder(),
input_shape.dim_size(i)) -
begin_indices[i];
}
auto constant_size = ctx->value_inference().AnalyzeConstant(
dynamic_size, xla::ValueInferenceMode::kValue);
OP_REQUIRES_OK(ctx, constant_size.status());
if (constant_size->AllValid()) {
sliced = xla::SliceInDim(
sliced, 0, constant_size->Get<int32>({}).value(), 1, i);
} else {
auto status = xla::SetDimensionSizeWithRebound(
&ctx->value_inference(), sliced, dynamic_size, i);
OP_REQUIRES_OK(ctx, status.status());
sliced = status.value();
}
}
ctx->SetOutput(0, sliced);
}
}
}
};
REGISTER_XLA_OP(Name("Slice")
.CompileTimeConstantInput("begin")
.CompileTimeConstantInput("size"),
SliceOp);
}
} | #include <functional>
#include <memory>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
template <typename T>
static void SliceHelper(::testing::benchmark::State& state) {
const int size = state.range(0);
Graph* g = new Graph(OpRegistry::Global());
DataType dt = DataTypeToEnum<T>::v();
int kDim = 100;
int kMaxSize = 15000;
CHECK_LT(size, kMaxSize);
Tensor begin(DT_INT32, TensorShape({2}));
begin.flat<int32>()(0) = 10;
begin.flat<int32>()(1) = 10;
Tensor sizes(DT_INT32, TensorShape({2}));
sizes.flat<int32>()(0) = kDim;
sizes.flat<int32>()(1) = size;
Tensor input(dt, TensorShape({2 * kDim, kMaxSize}));
input.flat<T>().setRandom();
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Slice")
.Input(test::graph::Constant(g, input))
.Input(test::graph::Constant(g, begin))
.Input(test::graph::Constant(g, sizes))
.Attr("T", dt)
.Finalize(g, &node));
FixupSourceAndSinkEdges(g);
test::Benchmark("cpu", g, nullptr, nullptr, nullptr,
"SINGLE_THREADED_EXECUTOR", false)
.Run(state);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * kDim *
size * sizeof(T));
}
void BM_SliceFloat(::testing::benchmark::State& state) {
SliceHelper<float>(state);
}
BENCHMARK(BM_SliceFloat)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000);
void BM_SliceBFloat16(::testing::benchmark::State& state) {
SliceHelper<bfloat16>(state);
}
BENCHMARK(BM_SliceBFloat16)->UseRealTime()->Arg(100)->Arg(1000)->Arg(10000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/slice_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/slice_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3d1a82c8-f54e-44c5-b05f-f68fb87ecf6e | cpp | tensorflow/tensorflow | meta_optimizer | tensorflow/core/grappler/optimizers/data/meta_optimizer.cc | tensorflow/core/grappler/optimizers/meta_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/data/meta_optimizer.h"
#include <array>
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
using ConfigMap =
std::map<string, tensorflow::RewriterConfig_CustomGraphOptimizer>;
constexpr std::array<const char*, 22> kTFDataOptimizations = {
"noop_elimination",
"disable_intra_op_parallelism",
"use_private_thread_pool",
"shuffle_and_repeat_fusion",
"map_parallelization",
"map_fusion",
"filter_fusion",
"map_and_filter_fusion",
"map_and_batch_fusion",
"batch_parallelization",
"filter_parallelization",
"make_sloppy",
"parallel_batch",
"slack",
"autotune_buffer_sizes",
"seq_interleave_prefetch",
"inject_prefetch",
"inject_io_prefetch_eligible",
"inject_io_prefetch",
"disable_prefetch_legacy_autotune",
"enable_gradient_descent",
"make_deterministic"};
Status ToConfigMap(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config,
ConfigMap* result) {
auto found = gtl::FindOrNull(config->parameter_map(), "optimizer_configs");
if (!found) return absl::OkStatus();
auto& options = found->list().s();
for (const auto& option_string : options) {
std::vector<string> split = absl::StrSplit(option_string, ':');
if (split.size() != 3) {
return errors::Internal(
"Wrong format for optimizer options. Expect <optimizer name>:<config "
"key>:<config value>, received: ",
option_string);
}
const string& optimizer_name = split[0];
const string& config_key = split[1];
const string& config_value = split[2];
auto optimizer_config = gtl::FindOrNull(*result, optimizer_name);
if (!optimizer_config) {
(*result)[optimizer_name] =
tensorflow::RewriterConfig_CustomGraphOptimizer();
optimizer_config = gtl::FindOrNull(*result, optimizer_name);
}
(*optimizer_config->mutable_parameter_map())[config_key].set_s(
config_value);
}
return absl::OkStatus();
}
}
Status TFDataMetaOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* output) {
GrapplerItem optimized_item = item;
for (const auto& optimization : kTFDataOptimizations) {
tensorflow::metrics::ScopedCounter<2> timings(
tensorflow::metrics::GetGraphOptimizationCounter(),
{"TFData", optimization});
Status status = ApplyOptimization(optimization, cluster, &optimized_item);
timings.ReportAndStop();
if (!status.ok()) return status;
}
output->Swap(&optimized_item.graph);
FunctionLibraryDefinition flib =
FunctionLibraryDefinition(OpRegistry::Global(), output->library())
.ReachableDefinitions(*output);
const auto producer = output->versions().producer();
bool optimized_functions = false;
for (const auto& name : flib.ListFunctionNames()) {
auto* func = flib.Find(name);
if (!data::IsTFDataFunction(*func)) continue;
VLOG(3) << "Optimize function: function=" << func->signature().name();
optimized_functions = true;
GrapplerFunctionItem func_item;
TF_RETURN_IF_ERROR(
MakeGrapplerFunctionItem(*func, flib, producer, &func_item));
GraphDef optimized_func_graph;
TF_RETURN_IF_ERROR(Optimize(cluster, func_item, &optimized_func_graph));
for (const FunctionDef& func_def :
optimized_func_graph.library().function()) {
if (flib.Find(func_def.signature().name()) == nullptr) {
TF_RETURN_IF_ERROR(flib.AddFunctionDef(func_def));
}
}
FunctionDef optimized_func;
func_item.SwapFunctionBody(std::move(optimized_func_graph));
TF_RETURN_IF_ERROR(MakeFunctionDef(func_item, flib, &optimized_func));
TF_RETURN_IF_ERROR(
flib.ReplaceFunction(func->signature().name(), optimized_func));
}
if (optimized_functions) {
*output->mutable_library() = flib.ToProto();
}
return absl::OkStatus();
}
Status TFDataMetaOptimizer::ApplyOptimization(const string& name,
Cluster* cluster,
GrapplerItem* item) const {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
const auto* optimizer = gtl::FindOrNull(enabled_optimizers_, name);
if (!optimizer) {
return absl::OkStatus();
}
GraphDef result;
(*optimizer)->set_deadline_usec(this->deadline_usec());
Status status = (*optimizer)->Optimize(cluster, *item, &result);
if (status.ok()) {
item->graph.Swap(&result);
} else if (absl::IsAborted(status)) {
status = absl::OkStatus();
}
return status;
}
Status TFDataMetaOptimizer::Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (!config) return absl::OkStatus();
auto& optimizers = config->parameter_map().at("optimizers").list().s();
ConfigMap optimizer_configs;
TF_RETURN_IF_ERROR(ToConfigMap(config, &optimizer_configs));
for (const auto& optimizer_name : optimizers) {
auto optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull(optimizer_name);
if (optimizer) {
TF_RETURN_IF_ERROR(
optimizer->Init(gtl::FindOrNull(optimizer_configs, optimizer_name)));
enabled_optimizers_[optimizer_name] = std::move(optimizer);
} else {
return errors::Internal(
"Tried to register a dataset optimizer that doesn't exist: ",
optimizer_name);
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(TFDataMetaOptimizer, "tf_data_meta_optimizer");
}
} | #include "tensorflow/core/grappler/optimizers/meta_optimizer.h"
#include <atomic>
#include "absl/strings/match.h"
#include "absl/strings/substitute.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/device:CPU:0";
class TestOptimizer : public CustomGraphOptimizer {
public:
static void SetOptimized(const bool flag_value) { optimized_ = flag_value; }
static bool IsOptimized() { return optimized_; }
TestOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(const tensorflow::RewriterConfig_CustomGraphOptimizer* config =
nullptr) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
optimized_ = true;
*optimized_graph = item.graph;
return absl::OkStatus();
}
private:
static bool optimized_;
};
bool TestOptimizer::optimized_;
REGISTER_GRAPH_OPTIMIZER(TestOptimizer);
class TestGraphOptimizer : public TestOptimizer {
public:
string name() const override { return "test_graph_optimizer"; }
};
REGISTER_GRAPH_OPTIMIZER(TestGraphOptimizer);
class TestOptimizerWithParams : public TestOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
CHECK(config != nullptr);
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(TestOptimizerWithParams);
class GrapplerItemPropertiesAccumulator : public CustomGraphOptimizer {
public:
static void SetOptimizationOptions(
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options) {
optimization_options_ = optimization_options;
}
static void ResetOptimizationOptions() { optimization_options_ = nullptr; }
GrapplerItemPropertiesAccumulator() {}
string name() const override {
return "grappler_item_properties_accumulator";
}
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
if (optimization_options_) {
optimization_options_->insert({item.id, item.optimization_options()});
}
return absl::OkStatus();
}
private:
static gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
optimization_options_;
};
gtl::FlatMap<string, GrapplerItem::OptimizationOptions>*
GrapplerItemPropertiesAccumulator::optimization_options_;
REGISTER_GRAPH_OPTIMIZER(GrapplerItemPropertiesAccumulator);
class MetaOptimizerTest : public GrapplerTest {};
TEST_F(MetaOptimizerTest, RunsCustomOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerWithParams) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizerWithParams");
auto* custom_config = rewriter_config.add_custom_optimizers();
custom_config->set_name("TestOptimizerWithParams");
(*custom_config->mutable_parameter_map())["foo"] = AttrValue();
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsCustomOptimizerAndCustomGraphOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
TestGraphOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("TestOptimizer");
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunsPluginOptimizer) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"/device:GPU:0"});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
TestOptimizer::SetOptimized(false);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_min_graph_nodes(-1);
const auto creator = []() { return new TestOptimizer; };
ConfigList config_list;
config_list.disable_model_pruning = true;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, RunOptimizersTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunToggleOptimizersAndCustomGraphOptimizerTwice) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
auto customGraphOptimizer = rewriter_config.add_custom_optimizers();
customGraphOptimizer->set_name("TestGraphOptimizer");
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_TRUE(TestGraphOptimizer::IsOptimized());
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibrary) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"my_mul"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "my_mul:z:0"}});
(*square_func.mutable_attr())["_noinline"].set_b(true);
FunctionDef quadratic_func = FunctionDefHelper::Create(
"MyQuadratic", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"square"}, "MySquare", {"x"}, {{"T", "$T"}}},
{{"quadratic"}, "MySquare", {"square:z"}, {{"T", "$T"}}}},
{{"z", "quadratic:z:0"}});
(*quadratic_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("quadratic", "MyQuadratic", {"b"}, {{"T", DT_INT32}}, kDevice),
NDef("out_s", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_q", "Identity", {"quadratic:0"}, {{"T", DT_INT32}}, kDevice)},
{mul_func, square_func, quadratic_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(3, optimized_flib.num_functions());
const auto specialized_name = [](const string& fn, const string& node,
const string& id) {
return absl::Substitute("$0_specialized_for_$1_at_$2", fn, node, id);
};
const string optimized_0 =
specialized_name("MyQuadratic", "quadratic", "tf_graph");
const string optimized_1 = specialized_name("MySquare", "square", "tf_graph");
const string optimized_2 =
specialized_name("MySquare", "square", optimized_0);
const FunctionDef* optimized_func_0 = optimized_flib.Find(optimized_0);
const FunctionDef* optimized_func_1 = optimized_flib.Find(optimized_1);
const FunctionDef* optimized_func_2 = optimized_flib.Find(optimized_2);
ASSERT_NE(optimized_func_0, nullptr);
ASSERT_NE(optimized_func_1, nullptr);
ASSERT_NE(optimized_func_2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_1, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_0, node.op());
}
}
EXPECT_EQ(2, count);
count = 0;
for (const NodeDef& node : optimized_func_0->node_def()) {
if (node.name() == "square" && ++count) {
EXPECT_EQ(optimized_2, node.op());
} else if (node.name() == "quadratic" && ++count) {
EXPECT_EQ(optimized_2, node.op());
}
}
EXPECT_EQ(2, count);
const std::vector<const FunctionDef*> optimized_funcs = {optimized_func_1,
optimized_func_2};
for (const FunctionDef* optimized_func : optimized_funcs) {
count = 0;
for (const NodeDef& node : optimized_func->node_def()) {
if (node.name() == "Func/my_mul/input/_0" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "Func/my_mul/input/_1" && ++count) {
EXPECT_EQ("Identity", node.op());
EXPECT_EQ(1, node.input_size());
EXPECT_EQ("x", node.input(0));
} else if (node.name() == "my_mul/mul" && ++count) {
EXPECT_EQ("Mul", node.op());
EXPECT_EQ(2, node.input_size());
EXPECT_EQ("Func/my_mul/input/_0:output:0", node.input(0));
EXPECT_EQ("Func/my_mul/input/_1:output:0", node.input(1));
}
EXPECT_TRUE(node.device().empty());
}
EXPECT_EQ(3, count);
ASSERT_EQ(1, optimized_func->ret().size());
EXPECT_EQ("Func/my_mul/output/_2:output:0", optimized_func->ret().at("z"));
}
item.fetch = {"out_s", "out_q"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<int>(4));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<int>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneUnusedOutputs) {
using test::function::NDef;
ConfigProto config_proto;
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_mul = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output0"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output0:z:0"}, {"z1", "output1:z:0"}, {"z2", "output2:z:0"}});
FunctionDef my_fwd = FunctionDefHelper::Create(
"Fwd", {"x:T", "y:T"}, {"z0:T", "z1:T", "z2:T"}, {"T: {float, int32}"},
{{{"output"}, "MyMul", {"x", "y"}, {{"T", "$T"}}}},
{{"z0", "output:z0:0"}, {"z1", "output:z1:0"}, {"z2", "output:z2:0"}});
(*my_mul.mutable_attr())["_noinline"].set_b(true);
(*my_fwd.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_mul, my_fwd};
GrapplerItem item;
item.id = "tf_graph";
item.fetch = {"ret"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fwd", "Fwd", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("ret", "Identity", {"fwd:2"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string specialized_my_fwd = "Fwd_specialized_for_fwd_at_tf_graph";
const string specialized_my_mul =
absl::StrCat("MyMul_specialized_for_output_at_", specialized_my_fwd);
FunctionDef expected_my_mul = FunctionDefHelper::Create(
specialized_my_mul, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output2"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output2:z:0"}});
FunctionDef expected_my_fwd = FunctionDefHelper::Create(
specialized_my_fwd, {"x:float", "y:float"}, {"z2:float"}, {},
{{{"output"}, specialized_my_mul, {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z2", "output:z2:0"}});
const FunctionDef* my_mul_spec = optimized_flib.Find(specialized_my_mul);
const FunctionDef* my_fwd_spec = optimized_flib.Find(specialized_my_fwd);
ASSERT_NE(my_mul_spec, nullptr);
ASSERT_NE(my_fwd_spec, nullptr);
CompareFunctions(expected_my_mul, *my_mul_spec);
CompareFunctions(expected_my_fwd, *my_fwd_spec);
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(4.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryPruneFunctionBody) {
using test::function::NDef;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.set_function_optimization(RewriterConfig::ON);
rewriter_config.add_optimizers("function");
rewriter_config.add_optimizers("pruning");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T"}, {"T: {float, double}"},
{{{"mul1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"mul2"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "mul1:z:0"}, {"z2", "mul2:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "MyFunc", {"a", "b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn1", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_fn2", "Identity", {"fn2:1"}, {{"T", DT_FLOAT}}, kDevice)},
{my_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
FunctionLibraryDefinition optimized_flib(OpRegistry::Global(),
output.library());
EXPECT_EQ(2, optimized_flib.num_functions());
const string optimized_fn1 = "MyFunc_specialized_for_fn1_at_tf_graph";
const string optimized_fn2 = "MyFunc_specialized_for_fn2_at_tf_graph";
const FunctionDef* optimized_func_fn1 = optimized_flib.Find(optimized_fn1);
const FunctionDef* optimized_func_fn2 = optimized_flib.Find(optimized_fn2);
ASSERT_NE(optimized_func_fn1, nullptr);
ASSERT_NE(optimized_func_fn2, nullptr);
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++count) {
EXPECT_EQ(optimized_fn1, node.op());
} else if (node.name() == "fn2" && ++count) {
EXPECT_EQ(optimized_fn2, node.op());
}
}
EXPECT_EQ(2, count);
ASSERT_EQ(1, optimized_func_fn1->node_def_size());
EXPECT_EQ(1, optimized_func_fn1->signature().output_arg_size());
EXPECT_EQ("z1", optimized_func_fn1->signature().output_arg(0).name());
EXPECT_EQ("mul1", optimized_func_fn1->node_def(0).name());
ASSERT_EQ(1, optimized_func_fn2->node_def_size());
EXPECT_EQ(1, optimized_func_fn2->signature().output_arg_size());
EXPECT_EQ("z2", optimized_func_fn2->signature().output_arg(0).name());
EXPECT_EQ("mul2", optimized_func_fn2->node_def(0).name());
item.fetch = {"out_fn1", "out_fn2"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
item.feed.emplace_back("b", test::AsScalar<float>(3.123f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(MetaOptimizerTest, OptimizeFunctionLibraryWithRestrictions) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
FunctionDef mul_func_1 = FunctionDefHelper::Create(
"MyMul1", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 = FunctionDefHelper::Create(
"MyMul2", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(optimization_options.size(), 3);
auto optimization_options_main =
gtl::FindOrNull(optimization_options, "main");
ASSERT_NE(optimization_options_main, nullptr);
EXPECT_TRUE(optimization_options_main->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_1 =
gtl::FindOrNull(optimization_options, "MyMul1");
ASSERT_NE(optimization_options_my_mul_1, nullptr);
EXPECT_TRUE(optimization_options_my_mul_1->allow_non_differentiable_rewrites);
auto optimization_options_my_mul_2 =
gtl::FindOrNull(optimization_options, "MyMul2");
ASSERT_NE(optimization_options_my_mul_2, nullptr);
EXPECT_FALSE(
optimization_options_my_mul_2->allow_non_differentiable_rewrites);
}
class SleepingOptimizer : public CustomGraphOptimizer {
public:
SleepingOptimizer() {}
string name() const override { return "test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
*optimized_graph = item.graph;
Env::Default()->SleepForMicroseconds(1000000);
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
optimized_graph->add_node();
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER(SleepingOptimizer);
TEST_F(MetaOptimizerTest, OptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
GraphDef output;
GraphDef original = item.graph;
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
CompareGraphs(original, output);
}
TEST_F(MetaOptimizerTest, MetaOptimizerTimesOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(1500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
EXPECT_EQ(status.message(), "meta_optimizer exceeded deadline.");
EXPECT_EQ(original_node_size + 1, output.node_size());
}
TEST_F(MetaOptimizerTest, OptimizerDoesNotTimeOut) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config;
RewriterConfig& rewriter_config =
*config.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.add_optimizers("SleepingOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_timeout_ms(2500);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
GraphDef output;
const int original_node_size = item.graph.node_size();
const Status status =
RunMetaOptimizer(std::move(item), config, nullptr, nullptr, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(original_node_size + 2, output.node_size());
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& post_optimization_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_post_optimization_verifier_config();
post_optimization_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunInterOptimizerVerifiersOnValidGraph) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {kDevice});
GrapplerItem item;
ASSERT_TRUE(fake_input.NextItem(&item));
ConfigProto config_proto;
auto& inter_optimizer_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_inter_optimizer_verifier_config();
inter_optimizer_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
}
TEST_F(MetaOptimizerTest, RunPostOptimizationVerifiersOnInvalidGraph) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef mul_func_1 =
FunctionDefHelper::Create("MyMul1", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 =
FunctionDefHelper::Create("MyMul2", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
auto& post_optimization_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_post_optimization_verifier_config();
post_optimization_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer_with_post_verifiers(nullptr, config_proto);
Status status =
optimizer_with_post_verifiers.Optimize(nullptr, item, &output);
EXPECT_TRUE(errors::IsInvalidArgument(status));
EXPECT_TRUE(absl::StrContains(
status.message(),
"NodeDef expected inputs 'float' do not match 3 inputs specified"));
}
TEST_F(MetaOptimizerTest, RunInterOptimizerVerifiersOnInvalidGraph) {
using test::function::NDef;
using FDH = FunctionDefHelper;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef mul_func_1 =
FunctionDefHelper::Create("MyMul1", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
FunctionDef mul_func_2 =
FunctionDefHelper::Create("MyMul2", {"x:float", "y:float"}, {"z:float"},
{}, {{{"mul"}, "Mul", {"x", "y"}, {}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("dy", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("mul_1", "MyMul1", {"x0", "x1"}, {}, kDevice),
NDef("mul_2", "MyMul2", {"x0", "x1"}, {}, kDevice),
NDef("dx", "SymbolicGradient", {"x0", "x1", "dy"},
{{"f", FDH::FunctionRef("MyMul2", {})},
{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT}}},
kDevice)},
{mul_func_1, mul_func_2});
item.fetch = {"mul_1", "mul_2", "dx"};
GraphDef output;
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::TWO);
rewriter_config.add_optimizers("GrapplerItemPropertiesAccumulator");
rewriter_config.set_min_graph_nodes(-1);
auto& inter_optimizer_verifier_config =
*config_proto.mutable_graph_options()
->mutable_rewrite_options()
->mutable_inter_optimizer_verifier_config();
inter_optimizer_verifier_config.set_structure_verifier(VerifierConfig::ON);
MetaOptimizer optimizer_with_inter_verifiers(nullptr, config_proto);
Status status =
optimizer_with_inter_verifiers.Optimize(nullptr, item, &output);
EXPECT_EQ(status.code(), absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(absl::StrContains(
status.message(),
"NodeDef expected inputs 'float' do not match 3 inputs specified"));
}
TEST_F(MetaOptimizerTest, CompressConstants) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Tensor zeros_t(DT_FLOAT, TensorShape({64}));
Tensor ones_t(DT_FLOAT, TensorShape({64}));
for (int i = 0; i < 64; ++i) {
zeros_t.flat<float>()(i) = 0.0f;
ones_t.flat<float>()(i) = 1.0f;
}
Output zeros = ops::Const(scope.WithOpName("zeros"), zeros_t);
Output host_ones = ops::Const(scope.WithOpName("host_ones"), ones_t);
GrapplerItem item;
TF_CHECK_OK(scope.ToGraphDef(&item.graph));
ASSERT_EQ(item.graph.node(1).name(), "host_ones");
item.graph.mutable_node(1)->set_op("HostConst");
item.fetch = {"zeros", "host_ones"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, {});
ConfigProto config_proto;
auto& rewriter_config =
*config_proto.mutable_graph_options()->mutable_rewrite_options();
rewriter_config.set_min_graph_nodes(-1);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
bool found_zeros = false;
bool found_host_ones = false;
ASSERT_EQ(output.node_size(), 2);
for (const auto& node : output.node()) {
if (node.name() == "zeros") {
found_zeros = true;
EXPECT_EQ(node.op(), "Const");
const TensorProto& zeroes_t = node.attr().at("value").tensor();
EXPECT_EQ(zeroes_t.float_val_size(), 0);
} else if (node.name() == "host_ones") {
found_host_ones = true;
EXPECT_EQ(node.op(), "HostConst");
const TensorProto& ones_t = node.attr().at("value").tensor();
EXPECT_EQ(ones_t.float_val_size(), 1);
EXPECT_EQ(ones_t.float_val(0), 1.0f);
}
}
EXPECT_TRUE(found_zeros);
EXPECT_TRUE(found_host_ones);
auto tensors = EvaluateNodes(output, item.fetch, {});
ASSERT_EQ(tensors.size(), 2);
ASSERT_EQ(tensors_expected.size(), 2);
for (int i = 0; i < 2; ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
}
TEST_F(MetaOptimizerTest, TestTFGRemoveDeadArguments) {
using test::function::NDef;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef case_func = FunctionDefHelper::Create(
"branch_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "x"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.id = "main";
AttrValue branches;
branches.mutable_list()->add_func()->set_name("branch_func");
AttrValue output_shapes;
output_shapes.mutable_list()->add_shape();
item.graph = test::function::GDef(
{NDef("idx", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("case", "Case", {"idx", "x", "y"},
{{"branches", std::move(branches)},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"output_shapes", std::move(output_shapes)}},
kDevice)},
{case_func});
item.fetch = {"case"};
GraphDef output;
ConfigProto config_proto;
config_proto.mutable_graph_options()
->mutable_rewrite_options()
->set_experimental_conditional_code_motion(RewriterConfig::OFF);
MetaOptimizer optimizer(nullptr, config_proto);
Status status = optimizer.Optimize(nullptr, item, &output);
EXPECT_TRUE(status.ok());
EXPECT_EQ(output.library().function_size(), 1);
auto& func = output.library().function(0);
EXPECT_EQ(func.signature().input_arg_size(), 1);
EXPECT_EQ(func.signature().input_arg(0).name(), "x_tfg_result_0");
}
TEST_F(MetaOptimizerTest, TestTFGControlFlowSink) {
using test::function::NDef;
gtl::FlatMap<string, GrapplerItem::OptimizationOptions> optimization_options;
GrapplerItemPropertiesAccumulator::SetOptimizationOptions(
&optimization_options);
FunctionDef case_func = FunctionDefHelper::Create(
"branch_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
AttrValue branches;
branches.mutable_list()->add_func()->set_name("branch_func");
AttrValue output_shapes;
output_shapes.mutable_list()->add_shape();
FunctionDef foo_func = FunctionDefHelper::Create(
"Foo", {"idx:int32", "a:float", "b:float"}, {"c:float"}, {},
{{{"add"}, "Add", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"mul"}, "Mul", {"a", "b"}, {{"T", DT_FLOAT}}},
{{"case"},
"Case",
{"idx", "add:z:0", "mul:z:0"},
{{"branches", std::move(branches)},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"output_shapes", std::move(output_shapes)}}}},
{{"c", "case:output:0"}});
(*foo_func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "main";
item.graph = test::function::GDef(
{NDef("idx", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("foo", "Foo", {"idx", "a", "b"}, {}, kDevice)},
{case_func, foo_func});
item.fetch = {"foo"};
GraphDef output;
ConfigProto config_proto;
MetaOptimizer optimizer(nullptr, config_proto);
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
EXPECT_EQ(output.library().function_size(), 2);
const FunctionDef* optimized_foo_func = nullptr;
const FunctionDef* specialized_branch_func = nullptr;
for (const FunctionDef& func : output.library().function()) {
if (func.signature().name() == "Foo")
optimized_foo_func = &func;
else if (absl::StartsWith(func.signature().name(), "branch_func"))
specialized_branch_func = &func;
}
ASSERT_TRUE(optimized_foo_func);
EXPECT_EQ(optimized_foo_func->node_def_size(), 1);
ASSERT_TRUE(specialized_branch_func);
EXPECT_EQ(specialized_branch_func->node_def_size(), 3);
}
class TfDataTestOptimizer : public CustomGraphOptimizer {
public:
static void InitCount() { count_ = 0; }
static int GetCount() { return count_; }
TfDataTestOptimizer() = default;
~TfDataTestOptimizer() override = default;
TfDataTestOptimizer(const TfDataTestOptimizer&) = delete;
TfDataTestOptimizer& operator=(const TfDataTestOptimizer& other) = delete;
std::string name() const override { return "tf_data_test_optimizer"; }
bool UsesFunctionLibrary() const override { return false; }
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
++count_;
*optimized_graph = item.graph;
return absl::OkStatus();
}
private:
static std::atomic<int> count_;
};
std::atomic<int> TfDataTestOptimizer::count_;
REGISTER_GRAPH_OPTIMIZER(TfDataTestOptimizer);
enum class FuncNestingType {
CallFromNode = 0,
CallFromAttr = 1,
CallFromList = 2
};
class TfDataTestFixture
: public ::testing::TestWithParam<std::tuple<bool, bool, FuncNestingType>> {
protected:
void SetUp() override {
is_inner_func_tf_data_ = std::get<0>(GetParam());
is_outer_func_tf_data_ = std::get<1>(GetParam());
func_nesting_type_ = std::get<2>(GetParam());
}
bool is_inner_func_tf_data_ = false;
bool is_outer_func_tf_data_ = false;
FuncNestingType func_nesting_type_ = FuncNestingType::CallFromNode;
};
void SetUpCallFromNode(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"inner_func"}, "inner_func", {"x", "x"}, {{"T", DT_FLOAT}}}},
{{"z", "inner_func:z:0"}});
}
void SetUpCallFromAttr(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"identity"},
"Identity",
{"x"},
{{"T", DT_FLOAT},
{"f", FunctionDefHelper::FunctionRef("inner_func", {})}}}},
{{"z", "x"}});
}
void SetUpCallFromList(FunctionDef& outer_func) {
outer_func = FunctionDefHelper::Create(
"outer_func", {"x:float"}, {"z:float"}, {},
{{{"identity"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}},
{{"z", "x"}});
AttrValue_ListValue* list_value =
(*outer_func.mutable_node_def(0)->mutable_attr())["list"].mutable_list();
NameAttrList* entry = list_value->add_func();
entry->set_name("inner_func");
}
TEST_P(TfDataTestFixture, TfDataTests) {
using test::function::NDef;
FunctionDef inner_func = FunctionDefHelper::Create(
"inner_func", {"x:float", "y:float"}, {"z:float"}, {},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", DT_FLOAT}}}},
{{"z", "mul:z:0"}});
(*inner_func.mutable_attr())[data::kTFDataFunction].set_b(
is_inner_func_tf_data_);
FunctionDef outer_func;
switch (func_nesting_type_) {
case FuncNestingType::CallFromNode:
SetUpCallFromNode(outer_func);
break;
case FuncNestingType::CallFromAttr:
SetUpCallFromAttr(outer_func);
break;
case FuncNestingType::CallFromList:
SetUpCallFromList(outer_func);
break;
default:
break;
}
(*outer_func.mutable_attr())[data::kTFDataFunction].set_b(
is_outer_func_tf_data_);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("outer_func_node", "outer_func", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_s", "Identity", {"outer_func_node:0"}, {{"T", DT_FLOAT}},
kDevice)},
{inner_func, outer_func});
TfDataTestOptimizer::InitCount();
ConfigProto config_proto;
auto& rewriter_config =
*(config_proto.mutable_graph_options()->mutable_rewrite_options());
rewriter_config.add_optimizers("TfDataTestOptimizer");
rewriter_config.set_min_graph_nodes(-1);
rewriter_config.set_meta_optimizer_iterations(RewriterConfig::ONE);
MetaOptimizer optimizer(nullptr, config_proto);
GraphDef output;
const Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
int expected_count = 3;
if (is_outer_func_tf_data_)
expected_count = 1;
else if (is_inner_func_tf_data_)
expected_count = 2;
EXPECT_EQ(TfDataTestOptimizer::GetCount(), expected_count);
FunctionLibraryDefinition flib(OpRegistry::Global(), output.library());
const FunctionDef* outer_func_after_opt = flib.Find("outer_func");
const FunctionDef* inner_func_after_opt = flib.Find("inner_func");
EXPECT_EQ(data::IsTFDataFunction(*outer_func_after_opt),
is_outer_func_tf_data_);
if (is_outer_func_tf_data_ || is_inner_func_tf_data_) {
EXPECT_EQ(data::IsTFDataFunction(*inner_func_after_opt), true);
} else {
EXPECT_EQ(data::IsTFDataFunction(*inner_func_after_opt), false);
}
}
INSTANTIATE_TEST_SUITE_P(
MetaOptimizerTest, TfDataTestFixture,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(FuncNestingType::CallFromNode,
FuncNestingType::CallFromAttr,
FuncNestingType::CallFromList)),
[](const ::testing::TestParamInfo<TfDataTestFixture::ParamType>& info) {
bool is_inner_func_tf_data = std::get<0>(info.param);
bool is_outer_func_tf_data = std::get<1>(info.param);
FuncNestingType func_nesting_type = std::get<2>(info.param);
std::string test_name;
if (is_inner_func_tf_data && is_outer_func_tf_data)
test_name = "both_funcs_tf_data";
else if (is_inner_func_tf_data)
test_name = "inner_func_tf_data";
else if (is_outer_func_tf_data)
test_name = "outer_func_tf_data";
else
test_name = "no_func_tf_data";
switch (func_nesting_type) {
case FuncNestingType::CallFromNode:
test_name += "_call_from_node";
break;
case FuncNestingType::CallFromAttr:
test_name += "_call_from_attribute";
break;
case FuncNestingType::CallFromList:
test_name += "_call_from_list";
break;
default:
break;
}
return test_name;
});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/meta_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/meta_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7965bdd1-2403-4bc1-9e96-9ed3b45991d5 | cpp | tensorflow/tensorflow | blocking_validator_runner | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
std::string GenerateRandomString() {
static const char charset[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
const int size = 10;
std::string result;
result.resize(size);
for (int i = 0; i < size; ++i) {
result[i] = charset[rand() % (sizeof(charset) - 1)];
}
return result;
}
}
BlockingValidatorRunner::BlockingValidatorRunner(
const ValidatorRunnerOptions& options)
: per_test_timeout_ms_(options.per_test_timeout_ms),
storage_path_base_(options.storage_path) {
validator_runner_impl_ = std::make_unique<ValidatorRunnerImpl>(
CreateModelLoaderPath(options), options.storage_path,
options.data_directory_path, options.per_test_timeout_ms,
options.custom_input_data.empty()
? nullptr
: std::make_unique<CustomValidationEmbedder>(
options.custom_input_batch_size, options.custom_input_data,
options.error_reporter),
options.error_reporter, options.nnapi_sl, options.gpu_plugin_handle,
options.validation_entrypoint_name, options.benchmark_result_evaluator);
}
MinibenchmarkStatus BlockingValidatorRunner::Init() {
return validator_runner_impl_->Init();
}
std::vector<FlatBufferBuilder> BlockingValidatorRunner::TriggerValidation(
const std::vector<const TFLiteSettings*>& for_settings) {
if (for_settings.empty()) {
return {};
}
std::string storage_path =
absl::StrCat(storage_path_base_, ".", GenerateRandomString());
TFLITE_LOG_PROD(TFLITE_LOG_INFO, "Validation storage path: %s",
storage_path.c_str());
std::vector<flatbuffers::FlatBufferBuilder> to_be_run;
std::vector<TFLiteSettingsT> for_settings_obj;
for_settings_obj.reserve(for_settings.size());
for (auto settings : for_settings) {
TFLiteSettingsT tflite_settings;
settings->UnPackTo(&tflite_settings);
flatbuffers::FlatBufferBuilder copy;
copy.Finish(CreateTFLiteSettings(copy, &tflite_settings));
to_be_run.emplace_back(std::move(copy));
for_settings_obj.emplace_back(tflite_settings);
}
validator_runner_impl_->TriggerValidationAsync(std::move(to_be_run),
storage_path);
int64_t total_timeout_ms = per_test_timeout_ms_ * (1 + for_settings.size());
int64_t deadline_us = Validator::BootTimeMicros() + total_timeout_ms * 1000;
bool within_timeout = true;
while ((validator_runner_impl_->GetNumCompletedResults()) <
for_settings.size() &&
(within_timeout = Validator::BootTimeMicros() < deadline_us)) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results =
validator_runner_impl_->GetCompletedResults();
if (!within_timeout) {
TFLITE_LOG_PROD(
TFLITE_LOG_WARNING,
"Validation timed out after %ld ms. Return before all tests finished.",
total_timeout_ms);
} else if (for_settings.size() != results.size()) {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"Validation completed.Started benchmarking for %d "
"TFLiteSettings, received %d results.",
for_settings.size(), results.size());
}
std::vector<TFLiteSettingsT> result_settings;
result_settings.reserve(results.size());
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
TFLiteSettingsT event_settings;
event->tflite_settings()->UnPackTo(&event_settings);
result_settings.emplace_back(std::move(event_settings));
}
for (auto& settings_obj : for_settings_obj) {
auto result_it =
std::find(result_settings.begin(), result_settings.end(), settings_obj);
if (result_it == result_settings.end()) {
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN,
0, 0,
0,
kMinibenchmarkCompletionEventMissing),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
results.emplace_back(std::move(fbb));
}
}
(void)unlink(storage_path.c_str());
return results;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.h"
#include <fcntl.h>
#include <iostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
class CustomResultEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class BlockingValidatorRunnerTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
options_.data_directory_path = ::testing::TempDir();
options_.storage_path =
absl::StrCat(::testing::TempDir(), "storage_path.fb.1");
options_.per_test_timeout_ms = 5000;
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
}
std::string plain_model_path_;
ValidatorRunnerOptions options_;
bool should_perform_test_ = true;
};
TEST_F(BlockingValidatorRunnerTest, SucceedWithEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdCloexecEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_fd = open(options_.model_path.c_str(), O_RDONLY | O_CLOEXEC);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_GPU));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithBufferModel) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer =
g_tflite_acceleration_embedded_mobilenet_validation_model;
options_.model_size =
g_tflite_acceleration_embedded_mobilenet_validation_model_len;
options_.model_path.clear();
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWithFdModelCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_path.clear();
options_.model_fd = open(plain_model_path_.c_str(), O_RDONLY);
ASSERT_GE(options_.model_fd, 0);
struct stat stat_buf = {0};
ASSERT_EQ(fstat(options_.model_fd, &stat_buf), 0);
options_.model_size = stat_buf.st_size;
options_.model_offset = 0;
options_.custom_input_batch_size = 3;
options_.custom_input_data = {std::vector<uint8_t>(3 * 224 * 224 * 3, 1)};
CustomResultEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
#ifdef __ANDROID__
fbb.Finish(CreateTFLiteSettings(fbb, Delegate_XNNPACK));
#else
fbb.Finish(CreateTFLiteSettings(fbb));
#endif
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
}
}
TEST_F(BlockingValidatorRunnerTest, SucceedWhenRunningMultipleTimes) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
int num_runs = 3;
for (int i = 0; i < num_runs; i++) {
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer()),
flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_END);
EXPECT_TRUE(event->result()->ok());
}
}
}
TEST_F(BlockingValidatorRunnerTest, ReturnErrorWhenTimedOut) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.per_test_timeout_ms = 50;
BlockingValidatorRunner runner(options_);
ASSERT_EQ(runner.Init(), kMinibenchmarkSuccess);
FlatBufferBuilder fbb;
fbb.Finish(CreateTFLiteSettings(fbb));
std::vector<FlatBufferBuilder> results = runner.TriggerValidation(
{flatbuffers::GetRoot<TFLiteSettings>(fbb.GetBufferPointer())});
EXPECT_THAT(results, testing::SizeIs(1));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
EXPECT_EQ(event->event_type(), BenchmarkEventType_ERROR);
ASSERT_NE(nullptr, event->error());
EXPECT_THAT(event->error()->mini_benchmark_error_code(),
testing::AnyOf(kMinibenchmarkCommandTimedOut,
kMinibenchmarkCompletionEventMissing));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/blocking_validator_runner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3b477b1b-3671-4769-aaca-1b58b7b1bac7 | cpp | tensorflow/tensorflow | prefetch_dataset_op | tensorflow/core/kernels/data/prefetch_dataset_op.cc | tensorflow/core/kernels/data/prefetch_dataset_op_test.cc | #include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include <algorithm>
#include <deque>
#include <limits>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/prefetch_autotuner.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/profiler/lib/traceme_encode.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tsl/platform/mutex.h"
namespace tensorflow {
namespace data {
constexpr const char* const PrefetchDatasetOp::kDatasetType;
constexpr const char* const PrefetchDatasetOp::kInputDataset;
constexpr const char* const PrefetchDatasetOp::kBufferSize;
constexpr const char* const PrefetchDatasetOp::kOutputTypes;
constexpr const char* const PrefetchDatasetOp::kOutputShapes;
constexpr const char* const PrefetchDatasetOp::kSlackPeriod;
constexpr const char* const PrefetchDatasetOp::kLegacyAutotune;
constexpr const char* const PrefetchDatasetOp::kBufferSizeMin;
namespace {
constexpr double kSleepFactor = 0.2;
constexpr char kBuffer[] = "buffer";
constexpr char kStatus[] = "status";
constexpr char kSizeSuffix[] = ".size";
constexpr char kCodeSuffix[] = ".code";
constexpr char kErrorMessageSuffix[] = ".error_message";
}
class PrefetchDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, int64_t buffer_size,
int64_t slack_period, bool legacy_autotune, int64_t buffer_size_min)
: DatasetBase(DatasetContext(ctx)),
input_(input),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_->Ref();
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* buffer_size = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(buffer_size_, &buffer_size));
AttrValue slack_period_attr;
b->BuildAttrValue(slack_period_, &slack_period_attr);
AttrValue legacy_autotune_attr;
b->BuildAttrValue(legacy_autotune_, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
b->BuildAttrValue(buffer_size_min_, &buffer_size_min_attr);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph_node, buffer_size},
{std::make_pair(kSlackPeriod, slack_period_attr),
std::make_pair(kLegacyAutotune, legacy_autotune_attr),
std::make_pair(kBufferSizeMin, buffer_size_min_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
mu_(std::make_shared<mutex>()),
cond_var_(std::make_shared<condition_variable>()),
buffer_size_min_(params.dataset->buffer_size_min_),
legacy_autotune_(params.dataset->legacy_autotune_),
buffer_size_(std::make_shared<model::SharedState>(
legacy_autotune_ ? 0 : params.dataset->buffer_size_, mu_,
cond_var_)) {
slack_us_ = 0;
}
~Iterator() override {
CancelThreads();
if (deregister_fn_) deregister_fn_();
}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(*mu_);
auto_tuner_ = std::make_unique<PrefetchAutotuner>(
dataset()->buffer_size_, dataset()->buffer_size_min_,
ctx->ram_budget_manager());
interleave_depth_ = ctx->interleave_depth();
if (buffer_size_->value == model::kAutotune) {
buffer_size_->value = buffer_size_min_;
}
cancellation_manager_ = std::make_unique<CancellationManager>();
TF_RETURN_IF_ERROR(RegisterCancellationCallback(
ctx->cancellation_manager(), [this]() { CancelThreads(); },
&deregister_fn_));
IteratorContext::Params params(ctx);
params.cancellation_manager = cancellation_manager_.get();
IteratorContext iter_ctx(params);
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&iter_ctx, this, prefix(), &input_impl_));
if (ctx->warm_start() && !ctx->is_restoring()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
ctx->MergeCheckpoint(iter_ctx.checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
const auto& stats_aggregator = ctx->stats_aggregator();
{
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
while (buffer_.empty() && !prefetch_thread_finished_ &&
buffer_limit() != 0) {
if (legacy_autotune_) {
auto_tuner_->RecordEmpty();
buffer_size_->value = auto_tuner_->buffer_limit();
}
RecordStop(ctx);
cond_var_->wait(l);
RecordStart(ctx);
}
if (!buffer_.empty()) {
return Consume(ctx, out_tensors, end_of_sequence);
}
if (prefetch_thread_finished_) {
*end_of_sequence = true;
return absl::OkStatus();
}
DCHECK_EQ(buffer_limit(), 0);
}
mutex_lock input_l(input_mu_);
{
mutex_lock l(*mu_);
if (stats_aggregator) {
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit()), num_elements());
}
}
return input_impl_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
double buffer_size_min = buffer_size_min_;
double buffer_size_max = std::numeric_limits<int64_t>::max();
if (buffer_size_->value != model::kAutotune && buffer_size_->value != 0) {
buffer_size_min = buffer_size_->value;
buffer_size_max = buffer_size_->value;
}
return model::MakeAsyncKnownRatioNode(
std::move(args),
1,
{model::MakeParameter(kBufferSize, buffer_size_, buffer_size_min,
buffer_size_max)},
legacy_autotune_);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
if (ctx->symbolic_checkpoint()) {
return absl::OkStatus();
}
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBufferSize, buffer_.size()));
for (size_t i = 0; i < buffer_.size(); i++) {
auto& buffer_element = buffer_[i];
TF_RETURN_IF_ERROR(WriteStatus(writer, i, buffer_element.status));
if (buffer_element.status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), buffer_element.value.size()));
for (size_t j = 0; j < buffer_element.value.size(); j++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"), buffer_element.value[j]));
}
}
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock input_l(input_mu_);
mutex_lock l(*mu_);
DCHECK(!prefetch_thread_);
DCHECK(buffer_.empty());
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(RestoreBuffer(ctx, reader));
}
if (ctx->warm_start()) {
TF_RETURN_IF_ERROR(EnsureThreadsStarted(ctx));
}
cond_var_->notify_all();
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
int64_t limit = -1, size = -1;
data::TraceMeMetadata result;
if (mu_->try_lock()) {
limit = buffer_limit();
size = buffer_.size();
if (!buffer_.empty()) {
std::vector<std::string> shapes(buffer_.front().value.size());
for (const auto& component : buffer_.front().value) {
shapes.push_back(component.shape().DebugString());
}
result.push_back(std::make_pair("next_element_shapes",
absl::StrJoin(shapes, ",")));
}
mu_->unlock();
}
result.push_back(std::make_pair(
"buffer_limit",
limit == -1
? kTraceInfoUnavailable
: strings::Printf("%lld", static_cast<long long>(limit))));
result.push_back(std::make_pair(
"autotune",
dataset()->buffer_size_ == model::kAutotune ? "true" : "false"));
result.push_back(std::make_pair(
"autotune_mode", legacy_autotune_ ? "legacy" : "performance"));
if (dataset()->slack_period_ > 0) {
result.push_back(std::make_pair(
"slack",
strings::Printf("%lld", static_cast<long long>(slack_us_.load()))));
}
result.push_back(std::make_pair(
"interleave_depth",
strings::Printf("%lld", static_cast<long long>(interleave_depth_))));
return result;
}
private:
struct BufferElement {
explicit BufferElement(IteratorContext* ctx)
: uid(tensorflow::EnvTime::NowNanos()),
checkpoint(MemoryCheckpoint{ctx->id_registry()}) {}
Status status;
std::vector<Tensor> value;
int64_t created_us;
const uint64 uid;
MemoryCheckpoint checkpoint;
};
Status RestoreBuffer(IteratorContext* const ctx,
IteratorStateReader* const reader)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
size_t buffer_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kBufferSize, &temp));
buffer_size = static_cast<size_t>(temp);
}
for (size_t i = 0; i < buffer_size; i++) {
buffer_.emplace_back(ctx);
auto& buffer_element = buffer_.back();
TF_RETURN_IF_ERROR(ReadStatus(reader, i, &buffer_element.status));
if (buffer_element.status.ok()) {
size_t value_size;
{
int64_t temp;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, kSizeSuffix), &temp));
value_size = static_cast<size_t>(temp);
}
buffer_element.value.reserve(value_size);
for (size_t j = 0; j < value_size; j++) {
buffer_element.value.emplace_back();
TF_RETURN_IF_ERROR(
reader->ReadTensor(ctx->flr(), absl::StrCat(prefix(), "::", i),
absl::StrCat(kBuffer, "[", j, "]"),
&buffer_element.value.back()));
}
}
RecordBufferEnqueue(ctx, buffer_element.value);
}
return absl::OkStatus();
}
int64_t buffer_limit() const TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (legacy_autotune_) {
return auto_tuner_->buffer_limit();
}
return buffer_size_->value;
}
void CancelThreads() TF_LOCKS_EXCLUDED(mu_) {
cancellation_manager_->StartCancel();
mutex_lock l(*mu_);
cancelled_ = true;
cond_var_->notify_all();
}
Status Consume(IteratorContext* ctx, std::vector<Tensor>* out_tensors,
bool* end_of_sequence) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const auto& stats_aggregator = ctx->stats_aggregator();
if (stats_aggregator) {
double buffer_limit_ = buffer_limit();
stats_aggregator->AddToHistogram(
stats_utils::BufferUtilizationHistogramName(dataset()->node_name()),
{static_cast<float>(buffer_.size()) /
static_cast<float>(buffer_limit_)},
num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferSizeScalarName(dataset()->node_name()),
static_cast<float>(buffer_.size()), num_elements());
stats_aggregator->AddScalar(
stats_utils::BufferCapacityScalarName(dataset()->node_name()),
static_cast<float>(buffer_limit_), num_elements());
}
Status s = buffer_.front().status;
if (s.ok()) {
int64_t buffer_element_id = buffer_.front().uid;
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchConsume", {{"element_id", buffer_element_id}});
},
profiler::kInfo);
if (dataset()->slack_period_ > 0 &&
(num_elements() + 1) % dataset()->slack_period_ == 0) {
int64_t slack_us = EnvTime::NowMicros() - buffer_.front().created_us;
slack_us_ = kSleepFactor * slack_us_ + slack_us;
VLOG(2) << "Setting slack_us_: " << slack_us_;
}
*out_tensors = std::move(buffer_.front().value);
ctx->MergeCheckpoint(&buffer_.front().checkpoint);
RecordBufferDequeue(ctx, *out_tensors);
if (legacy_autotune_ && !auto_tuner_->HasElementSize()) {
auto_tuner_->SetElementSize(GetAllocatedBytes(*out_tensors));
}
} else {
RecordBufferDequeue(ctx, buffer_.front().value);
}
if (legacy_autotune_) {
auto_tuner_->RecordConsumption(buffer_.size());
buffer_size_->value = auto_tuner_->buffer_limit();
}
buffer_.pop_front();
*end_of_sequence = false;
cond_var_->notify_all();
return s;
}
Status EnsureThreadsStarted(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
if (!prefetch_thread_) {
std::shared_ptr<IteratorContext> new_ctx =
std::make_shared<IteratorContext>(*ctx);
prefetch_thread_ = ctx->StartThread(
"tf_data_prefetch", [this, new_ctx]() { PrefetchThread(new_ctx); });
}
return absl::OkStatus();
}
void PrefetchThread(const std::shared_ptr<IteratorContext>& ctx) {
RecordStart(ctx.get());
auto cleanup = gtl::MakeCleanup([this, ctx] { RecordStop(ctx.get()); });
int num_produced = 0;
while (true) {
{
mutex_lock l(*mu_);
while (!cancelled_ && buffer_.size() >= buffer_limit()) {
RecordStop(ctx.get());
cond_var_->wait(l);
RecordStart(ctx.get());
}
if (cancelled_) {
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
}
if (dataset()->slack_period_ > 0 &&
num_produced % dataset()->slack_period_ == 0) {
VLOG(2) << "Sleeping for: " << slack_us_ * kSleepFactor;
ctx->env()->SleepForMicroseconds(slack_us_ * kSleepFactor);
}
mutex_lock input_l(input_mu_);
bool end_of_sequence = false;
BufferElement buffer_element(ctx.get());
{
tsl::profiler::TraceMe traceme(
[&] {
return tsl::profiler::TraceMeEncode(
"PrefetchProduce", {{"element_id", buffer_element.uid}});
},
profiler::kInfo);
buffer_element.status = input_impl_->GetNext(
ctx.get(), &buffer_element.value, &end_of_sequence);
buffer_element.checkpoint.Merge(ctx->checkpoint());
}
if (buffer_element.status.ok() && end_of_sequence) {
mutex_lock l(*mu_);
prefetch_thread_finished_ = true;
cond_var_->notify_all();
return;
}
{
mutex_lock l(*mu_);
RecordBufferEnqueue(ctx.get(), buffer_element.value);
buffer_element.created_us = EnvTime::NowMicros();
buffer_.push_back(std::move(buffer_element));
cond_var_->notify_all();
}
++num_produced;
}
}
Status WriteStatus(IteratorStateWriter* writer, size_t index,
const Status& status) TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(absl::StrCat(prefix(), "::", index), CodeKey(),
static_cast<int64_t>(status.code())));
if (!status.ok()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
absl::StrCat(prefix(), "::", index), ErrorMessageKey(),
std::string(status.message())));
}
return absl::OkStatus();
}
Status ReadStatus(IteratorStateReader* reader, size_t index, Status* status)
TF_EXCLUSIVE_LOCKS_REQUIRED(*mu_) {
int64_t code_int;
TF_RETURN_IF_ERROR(reader->ReadScalar(absl::StrCat(prefix(), "::", index),
CodeKey(), &code_int));
absl::StatusCode code = static_cast<absl::StatusCode>(code_int);
if (code != absl::StatusCode::kOk) {
tstring error_message;
TF_RETURN_IF_ERROR(
reader->ReadScalar(absl::StrCat(prefix(), "::", index),
ErrorMessageKey(), &error_message));
*status = Status(code, error_message);
} else {
*status = absl::OkStatus();
}
return absl::OkStatus();
}
string CodeKey() { return absl::StrCat(kStatus, kCodeSuffix); }
string ErrorMessageKey() {
return absl::StrCat(kStatus, kErrorMessageSuffix);
}
const std::shared_ptr<mutex> mu_;
mutex input_mu_ TF_ACQUIRED_BEFORE(*mu_);
std::unique_ptr<CancellationManager> cancellation_manager_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(input_mu_);
const std::shared_ptr<condition_variable> cond_var_;
const int64_t buffer_size_min_;
std::unique_ptr<PrefetchAutotuner> auto_tuner_ TF_GUARDED_BY(*mu_);
std::deque<BufferElement> buffer_ TF_GUARDED_BY(*mu_);
bool cancelled_ TF_GUARDED_BY(*mu_) = false;
bool prefetch_thread_finished_ TF_GUARDED_BY(*mu_) = false;
const bool legacy_autotune_;
std::atomic<int64_t> slack_us_;
const std::shared_ptr<model::SharedState> buffer_size_;
std::function<void()> deregister_fn_;
int64 interleave_depth_ = -1;
std::unique_ptr<Thread> prefetch_thread_ TF_GUARDED_BY(*mu_);
};
const DatasetBase* const input_;
const int64_t buffer_size_;
const int64_t slack_period_;
const bool legacy_autotune_ = true;
const int64_t buffer_size_min_ = 0;
absl::Status random_indexing_compatible_;
TraceMeMetadata traceme_metadata_;
};
PrefetchDatasetOp::PrefetchDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
if (ctx->HasAttr(kSlackPeriod)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSlackPeriod, &slack_period_));
}
if (ctx->HasAttr(kLegacyAutotune)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kLegacyAutotune, &legacy_autotune_));
}
if (ctx->HasAttr(kBufferSizeMin)) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kBufferSizeMin, &buffer_size_min_));
}
if (GetExperiments().contains("autotune_buffer_optimization")) {
legacy_autotune_ = false;
buffer_size_min_ = std::max(static_cast<int64_t>(1), buffer_size_min_);
}
}
void PrefetchDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t buffer_size = 0;
OP_REQUIRES_OK(ctx,
ParseScalarArgument<int64_t>(ctx, kBufferSize, &buffer_size));
OP_REQUIRES(ctx, buffer_size >= 0 || buffer_size == model::kAutotune,
errors::InvalidArgument("buffer_size must be >= 0 or set "
"buffer_size to be ",
model::kAutotune, " for auto-tuning"));
if (buffer_size == model::kAutotune) {
metrics::RecordTFDataAutotune(kDatasetType);
}
*output = new Dataset(ctx, input, buffer_size, slack_period_,
legacy_autotune_, buffer_size_min_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset").Device(DEVICE_CPU).Priority(2),
PrefetchDatasetOp);
REGISTER_KERNEL_BUILDER(Name("PrefetchDataset")
.Device(DEVICE_GPU)
.HostMemory("buffer_size")
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
PrefetchDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/prefetch_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "prefetch_dataset";
class PrefetchDatasetOpTest : public DatasetOpsTestBase {};
class PrefetchDatasetParams : public DatasetParams {
public:
template <typename T>
PrefetchDatasetParams(T input_dataset_params, int64_t buffer_size,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int64_t slack_period, bool legacy_autotune,
int64_t buffer_size_min, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
buffer_size_(buffer_size),
slack_period_(slack_period),
legacy_autotune_(legacy_autotune),
buffer_size_min_(buffer_size_min) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {buffer_size_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(PrefetchDatasetOp::kInputDataset);
input_names->emplace_back(PrefetchDatasetOp::kBufferSize);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("slack_period", slack_period_);
attr_vector->emplace_back("legacy_autotune", legacy_autotune_);
attr_vector->emplace_back("buffer_size_min", buffer_size_min_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override {
return PrefetchDatasetOp::kDatasetType;
}
private:
int64_t buffer_size_;
int64_t slack_period_;
bool legacy_autotune_;
int64_t buffer_size_min_;
};
PrefetchDatasetParams PrefetchDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
5,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
0,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
true,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
5,
false,
0,
kNodeName);
}
PrefetchDatasetParams PrefetchDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-1,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
3,
kNodeName);
}
PrefetchDatasetParams InvalidBufferSizePrefetchDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{10, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})},
"tensor_slice");
return PrefetchDatasetParams(
tensor_slice_dataset_params,
-2,
{DT_INT64},
{PartialTensorShape({1})},
0,
true,
0,
kNodeName);
}
std::vector<GetNextTestCase<PrefetchDatasetParams>> GetNextTestCases() {
return {
{PrefetchDatasetParams1(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams6(),
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_GET_NEXT_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
GetNextTestCases())
TEST_F(PrefetchDatasetOpTest, DatasetNodeName) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(PrefetchDatasetOpTest, DatasetTypeString) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(PrefetchDatasetOp::kDatasetType)));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, DatasetOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes(dataset_params.output_shapes()));
}
std::vector<CardinalityTestCase<PrefetchDatasetParams>> CardinalityTestCases() {
return {{PrefetchDatasetParams1(),
10},
{PrefetchDatasetParams2(),
10},
{PrefetchDatasetParams3(),
10},
{PrefetchDatasetParams4(),
10},
{PrefetchDatasetParams5(),
10}};
}
DATASET_CARDINALITY_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
CardinalityTestCases())
TEST_F(PrefetchDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes(dataset_params.output_shapes()));
}
TEST_F(PrefetchDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = PrefetchDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
PrefetchDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<PrefetchDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{PrefetchDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{PrefetchDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1}, {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})},
{
PrefetchDatasetParams5(),
{0, 4, 11},
CreateTensors<int64_t>(
TensorShape{1},
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(PrefetchDatasetOpTest, PrefetchDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(PrefetchDatasetOpTest, InvalidBufferSize) {
auto dataset_params = InvalidBufferSizePrefetchDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(), error::INVALID_ARGUMENT);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/prefetch_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d7045fb-b6b0-4986-a243-6e549d212000 | cpp | tensorflow/tensorflow | lower_if_op | tensorflow/core/common_runtime/lower_if_op.cc | tensorflow/core/common_runtime/lower_if_op_test.cc | #include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
namespace tensorflow {
namespace {
using NodeOut = NodeBuilder::NodeOut;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsConstants::kLowerAsMultiDeviceFunctionAttr;
class CondBuilder {
public:
enum Branch { kElseBranch = 0, kThenBranch = 1 };
CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph);
Status CreatePivotNodes();
Status AddInputs();
Status AddOutputs();
Status BuildLoweredIfOutput();
private:
string NewName(const string& infix);
Status AddInput(Node* src, int src_output);
Status SetColocationAndFinalize(NodeBuilder node_builder, Graph* graph,
Node** created_node);
std::vector<NodeOut> outputs_;
Node* control_predecessor_;
Node* if_op_;
const AttrValue* coloc_attr_;
Node* lowered_if_output_;
OutputTensor pred_;
Node* pivot_f_;
Node* pivot_t_;
Node* then_call_node_;
Node* else_call_node_;
Node* branch_executed_node_;
Graph* graph_;
string name_;
bool keep_node_fetchable_;
NodeDebugInfo debug_info_;
NodeBuilder then_call_builder_;
NodeBuilder else_call_builder_;
};
CondBuilder::CondBuilder(Node* if_op, const NameAttrList& then_fn,
const NameAttrList& else_fn, bool keep_node_fetchable,
Graph* graph)
: if_op_(if_op),
coloc_attr_(if_op_->attrs().Find(kColocationAttrName)),
graph_(graph),
name_(if_op->name()),
keep_node_fetchable_(keep_node_fetchable),
debug_info_(*if_op_),
then_call_builder_(NewName("then"), then_fn.name(), graph->op_registry(),
&debug_info_),
else_call_builder_(NewName("else"), else_fn.name(), graph->op_registry(),
&debug_info_) {
TF_CHECK_OK(if_op_->input_tensor(0, &pred_));
then_call_builder_.Device(if_op_->requested_device());
then_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : then_fn.attr()) {
then_call_builder_.Attr(i.first, i.second);
}
else_call_builder_.Device(if_op_->requested_device());
else_call_builder_.Attr(kLowerAsMultiDeviceFunctionAttr, true);
for (const auto& i : else_fn.attr()) {
else_call_builder_.Attr(i.first, i.second);
}
}
Status CondBuilder::SetColocationAndFinalize(NodeBuilder node_builder,
Graph* graph,
Node** created_node) {
if (coloc_attr_ != nullptr) {
node_builder = node_builder.Attr(kColocationAttrName, *coloc_attr_);
}
return node_builder.Finalize(graph, created_node);
}
Status CondBuilder::CreatePivotNodes() {
Node* switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("switch_pred"), "Switch",
graph_->op_registry(), &debug_info_)
.Input(NodeOut(pred_))
.Input(NodeOut(pred_))
.Device(if_op_->requested_device()),
graph_, &switch_pred));
control_predecessor_ = switch_pred;
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_f"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kElseBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_f_));
TF_RETURN_IF_ERROR(
SetColocationAndFinalize(NodeBuilder(NewName("pivot_t"), "Identity",
graph_->op_registry(), &debug_info_)
.Input(switch_pred, kThenBranch)
.Device(if_op_->requested_device()),
graph_, &pivot_t_));
return absl::OkStatus();
}
string CondBuilder::NewName(const string& infix) {
return graph_->NewName(strings::StrCat(name_, "/", infix));
}
Status CondBuilder::AddInput(Node* src, int src_output) {
Node* input;
NodeDebugInfo debug_info(*src);
TF_RETURN_IF_ERROR(
NodeBuilder(NewName(src->name()), "Switch", graph_->op_registry(),
&debug_info)
.Input(src, src_output)
.Input(pred_)
.Device(src->requested_device())
.Attr(kColocationAttrName,
{absl::StrCat(kColocationGroupPrefix, src->name())})
.Finalize(graph_, &input));
then_call_builder_.Input(input, kThenBranch);
else_call_builder_.Input(input, kElseBranch);
return absl::OkStatus();
}
Status CondBuilder::AddInputs() {
std::vector<const Edge*> edges;
TF_RETURN_IF_ERROR(if_op_->input_edges(&edges));
for (int i = 1; i < edges.size(); ++i) {
const Edge* e = edges[i];
TF_RETURN_IF_ERROR(AddInput(e->src(), e->src_output()));
}
for (const Edge* e : if_op_->in_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(e->src(), control_predecessor_);
}
}
return absl::OkStatus();
}
Status CondBuilder::AddOutputs() {
TF_RETURN_IF_ERROR(then_call_builder_.Finalize(graph_, &then_call_node_));
graph_->AddControlEdge(pivot_t_, then_call_node_);
TF_RETURN_IF_ERROR(else_call_builder_.Finalize(graph_, &else_call_node_));
graph_->AddControlEdge(pivot_f_, else_call_node_);
std::vector<Node*> merges(then_call_node_->num_outputs());
outputs_.resize(merges.size());
for (int i = 0; i < then_call_node_->num_outputs(); ++i) {
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("output"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({NodeOut(then_call_node_, i), NodeOut(else_call_node_, i)})
.Device(if_op_->requested_device()),
graph_, &merges[i]));
outputs_[i] = NodeOut(merges[i], 0);
}
TF_RETURN_IF_ERROR(SetColocationAndFinalize(
NodeBuilder(NewName("branch_executed"), "Merge", graph_->op_registry(),
&debug_info_)
.Input({pivot_t_, pivot_f_})
.ControlInputs({then_call_node_, else_call_node_})
.Device(if_op_->requested_device()),
graph_, &branch_executed_node_));
TF_RETURN_IF_ERROR(BuildLoweredIfOutput());
for (const Edge* e : if_op_->out_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(branch_executed_node_, e->dst());
} else {
graph_->AddEdge(merges[e->src_output()], 0, e->dst(), e->dst_input());
}
}
return absl::OkStatus();
}
Status CondBuilder::BuildLoweredIfOutput() {
NodeBuilder builder = keep_node_fetchable_ && !outputs_.empty()
? NodeBuilder(name_, "IdentityN").Input(outputs_)
: NodeBuilder(name_, "NoOp");
return builder.Device(if_op_->requested_device())
.ControlInput(branch_executed_node_)
.Finalize(graph_, &lowered_if_output_);
}
}
Status RewriteIfNode(Node* n, Graph* g, bool keep_node_fetchable) {
VLOG(2) << "Lower If node (keep_node_fetchable=" << keep_node_fetchable
<< "): " << SummarizeNode(*n);
const AttrValue* then_attr = n->attrs().Find("then_branch");
if (then_attr == nullptr) {
return errors::InvalidArgument("Then branch function missing");
}
const AttrValue* else_attr = n->attrs().Find("else_branch");
if (else_attr == nullptr) {
return errors::InvalidArgument("Else branch function missing");
}
CondBuilder cb(n, then_attr->func(), else_attr->func(), keep_node_fetchable,
g);
TF_RETURN_IF_ERROR(cb.CreatePivotNodes());
TF_RETURN_IF_ERROR(cb.AddInputs());
TF_RETURN_IF_ERROR(cb.AddOutputs());
g->RemoveNode(n);
return absl::OkStatus();
}
} | #include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
AttrValue FuncAttr(const string& name) {
AttrValue attr;
attr.mutable_func()->set_name(name);
return attr;
}
SessionOptions SessionOptionsWithInlining() {
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_do_function_inlining(true);
return session_options;
}
Status Rewrite(std::unique_ptr<Graph>* graph) {
FunctionLibraryDefinition flib_def((*graph)->flib_def());
GraphOptimizationPassOptions opt_options;
SessionOptions session_options = SessionOptionsWithInlining();
opt_options.session_options = &session_options;
opt_options.graph = graph;
opt_options.flib_def = &flib_def;
LowerFunctionalOpsPass pass;
return pass.Run(opt_options);
}
TEST(LowerIfOpTest, Simple) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = test::function::XTimesTwo();
*(f_lib_proto.add_function()) = test::function::XTimesFour();
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", FuncAttr("XTimesTwo"))
.Attr("else_branch", FuncAttr("XTimesFour"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
ASSERT_FALSE(op->IsSwitch());
ASSERT_FALSE(op->IsMerge());
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(node_called_if_count, 1);
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) {
++switch_count;
}
if (op->IsMerge()) {
++merge_count;
}
ASSERT_NE(op->type_string(), "If");
if (op->name() == "if") {
++node_called_if_count;
}
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 2);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
TEST(LowerIfOpTest, BranchFunctionsWithoutOutputs) {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
using FDH = ::tensorflow::FunctionDefHelper;
const auto assign_add = [](const string& fn_name, int v) {
const Tensor tensor = test::AsScalar<int32>(v);
return FDH::Create(
fn_name, {"v: resource"}, {}, {},
{
{{"c"}, "Const", {}, {{"value", tensor}, {"dtype", DT_INT32}}},
{{"upd"},
"AssignAddVariableOp",
{"v", "c:output"},
{{"dtype", DT_INT32}}},
},
{},
{{"side_effects", "upd"}});
};
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = assign_add("AddOne", 1);
*(f_lib_proto.add_function()) = assign_add("AddTwo", 2);
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
auto initial_val = ops::Placeholder(root.WithOpName("initial_val"), DT_INT32);
auto var = ops::VarHandleOp(root.WithOpName("var"), DT_INT32, {});
auto init = ops::AssignVariableOp(root.WithOpName("init"), var, initial_val);
Node* if_node;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(var.node())});
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.ControlInput(init.operation.node())
.Attr("then_branch", FuncAttr("AddOne"))
.Attr("else_branch", FuncAttr("AddTwo"))
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", DataTypeSlice{})
.Finalize(root.graph(), &if_node));
auto read = ops::ReadVariableOp(
root.WithOpName("read").WithControlDependencies(Output(if_node)), var,
DT_INT32);
TF_ASSERT_OK(root.DoShapeInference(if_node));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int switch_count = 0;
int merge_count = 0;
int node_called_if_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->IsSwitch()) ++switch_count;
if (op->IsMerge()) ++merge_count;
if (op->name() == "if") ++node_called_if_count;
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(switch_count, 2);
ASSERT_EQ(merge_count, 1);
ASSERT_EQ(node_called_if_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 11);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(initial_val.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(read)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 12);
}
}
TEST(LowerIfOpTest, DoNotInlineLoweredFunction) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
FunctionDef x_times_two = test::function::XTimesTwo();
FunctionDef x_times_four = test::function::XTimesFour();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
(*x_times_four.mutable_attr())["_noinline"].set_b(true);
FunctionDefLibrary f_lib_proto;
*(f_lib_proto.add_function()) = x_times_two;
*(f_lib_proto.add_function()) = x_times_four;
Scope root = Scope::NewRootScope().ExitOnError();
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(f_lib_proto));
auto a = ops::Placeholder(root.WithOpName("A"), DT_INT32);
auto pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Node* written_if;
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
AttrValue tb;
tb.mutable_func()->set_name("XTimesTwo");
AttrValue eb;
eb.mutable_func()->set_name("XTimesFour");
TF_ASSERT_OK(
NodeBuilder("if", "If", &root.graph()->flib_def())
.Input(pred.node())
.Input(inputs)
.Attr("then_branch", tb)
.Attr("else_branch", eb)
.Attr(LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr, true)
.Attr("Tout", {DT_INT32})
.Finalize(root.graph(), &written_if));
TF_ASSERT_OK(root.DoShapeInference(written_if));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(Rewrite(&graph));
int x_times_two_count = 0;
int x_times_four_count = 0;
for (const auto* op : graph->op_nodes()) {
if (op->type_string() == x_times_two.signature().name()) {
x_times_two_count++;
}
if (op->type_string() == x_times_four.signature().name()) {
x_times_four_count++;
}
ASSERT_NE(op->type_string(), "If");
}
ASSERT_EQ(x_times_two_count, 1);
ASSERT_EQ(x_times_four_count, 1);
ClientSession session(root, SessionOptionsWithInlining());
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(false));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 40);
}
{
ClientSession::FeedType feeds;
feeds.emplace(Output(pred.node()), Input::Initializer(true));
feeds.emplace(Output(a.node()), Input::Initializer(10));
std::vector<Tensor> out_tensors;
TF_ASSERT_OK(session.Run(feeds, {Output(written_if)}, &out_tensors));
EXPECT_EQ(out_tensors.size(), 1);
EXPECT_EQ(out_tensors[0].scalar<int>()(), 20);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_if_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/lower_if_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6cec5175-feab-4687-a71f-6b5f3cebf0c2 | cpp | tensorflow/tensorflow | tfprof_timeline | tensorflow/core/profiler/internal/tfprof_timeline.cc | tensorflow/core/profiler/internal/tfprof_timeline_test.cc | #include "tensorflow/core/profiler/internal/tfprof_timeline.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
namespace tensorflow {
namespace tfprof {
namespace {
int kMaxDisplayedMemNode = 10;
std::string GetTimeDevName(const std::string& dev) {
if (dev.find("stream") != dev.npos) {
return absl::StrCat("Op execution threads: ", dev);
} else {
return absl::StrCat("Op scheduling threads: ", dev);
}
}
std::string GetMemoryLaneName(const std::string& dev) {
return absl::StrCat("mem usage on:", dev);
}
}
Json::Value ChromeTraceFormatter::CreateEvent(const string& ph,
const string& category,
const string& name, int64_t pid,
int64_t tid, int64_t ts) {
Json::Value event(Json::objectValue);
event["ph"] = Json::Value(ph);
event["cat"] = Json::Value(category);
event["name"] = Json::Value(name);
event["pid"] = Json::Int64(pid);
event["tid"] = Json::Int64(tid);
event["ts"] = Json::Int64(ts);
return event;
}
void ChromeTraceFormatter::EmitPID(const string& name, int64_t pid) {
Json::Value event(Json::objectValue);
event["name"] = Json::Value("process_name");
event["ph"] = Json::Value("M");
event["pid"] = Json::Int64(pid);
Json::Value args(Json::objectValue);
args["name"] = Json::Value(name);
event["args"] = args;
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitRegion(int64_t ts, int64_t duration, int64_t pid,
int64_t tid, const string& category,
const string& name, Json::Value args) {
Json::Value event = CreateEvent("X", category, name, pid, tid, ts);
event["dur"] = Json::Int64(duration);
event["args"] = std::move(args);
metadata_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowStart(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("s", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitFlowEnd(const string& name, int64_t ts,
int64_t pid, int64_t tid,
int64_t flow_id) {
Json::Value event = CreateEvent("t", "DataFlow", name, pid, tid, ts);
event["id"] = Json::Int64(flow_id);
events_.push_back(event);
}
void ChromeTraceFormatter::EmitCounter(
const string& category, const string& name, int64_t pid, int64_t ts,
const string& device, int64_t bytes,
const std::map<int64_t, std::vector<string>>& tensor_mem) {
Json::Value event = CreateEvent("C", category, "Allocated Bytes", pid, 0, ts);
Json::Value args(Json::objectValue);
args["Allocator Bytes in Use"] = Json::Int64(bytes);
event["args"] = args;
events_.push_back(event);
Json::Value event2 =
CreateEvent("C", category, "Top Allocations", pid + 1, 0, ts);
Json::Value args2(Json::objectValue);
for (int i = 1; i < kMaxDisplayedMemNode; ++i) {
args2[absl::StrFormat("Top Allocation %02d", i)] = Json::Value("N/A");
}
int count = 0;
for (auto it = tensor_mem.rbegin(); it != tensor_mem.rend(); ++it) {
for (const string& t : it->second) {
if (bytes < it->first || count >= kMaxDisplayedMemNode) {
break;
}
args2[absl::StrFormat("Top Allocation %02d", count)] =
Json::Value(absl::StrCat(it->first / 1000000.0, " MB from ", t));
++count;
bytes -= it->first;
}
}
args2[std::string("Not Displayed")] =
Json::Value(absl::StrFormat("%.2f MB", bytes / 1000000.0));
event2["args"] = args2;
events_.push_back(event2);
}
string ChromeTraceFormatter::Format() {
Json::Value trace;
trace["traceEvents"] = Json::Value(Json::arrayValue);
for (const Json::Value& v : metadata_) {
trace["traceEvents"].append(v);
}
for (const Json::Value& v : events_) {
trace["traceEvents"].append(v);
}
Json::FastWriter writer;
string trace_str = writer.write(trace);
if (trace_str.length() > 200 * 1024 * 1024) {
absl::FPrintF(stderr,
"Trace file is over 200MB. Chrome might not be able to "
"display it. Consider to use filters (e.g. -min_micros "
"> 1000 or -op_type .*gpu:0.* to reduce the size.\n");
}
return trace_str;
}
void MemoryTracker::TrackNode(int64_t step, const GraphNode* node) {
if (!node->Trackable(step)) {
return;
}
Device& dev = devices_[node->node->canonical_device()];
std::map<int64_t, int64_t> allocs;
for (const auto& alloc : node->node->allocations(step)) {
allocs[alloc.alloc_micros()] += alloc.alloc_bytes();
dev.tracked_allocations[alloc.alloc_micros()] += alloc.alloc_bytes();
}
dev.tracked_allocations[0] += node->node->accelerator_persistent_bytes();
allocs[0] += node->node->accelerator_persistent_bytes();
int64_t last = 0;
std::map<int64_t, int64_t>& aggregate_allocs =
dev.tensor_allocs[node->name()];
for (auto it = allocs.begin(); it != allocs.end(); ++it) {
last += it->second;
aggregate_allocs[it->first] = last;
}
for (const auto& bytes_in_use : node->node->allocator_bytes_in_use(step)) {
if (bytes_in_use.first <= 0) continue;
dev.allocations[bytes_in_use.first] = bytes_in_use.second;
}
}
void Timeline::AllocateTimeNodes(GraphNode* gnode) {
if (gnode->Trackable(step_)) {
TrackNode(gnode);
const TFGraphNode* node = gnode->node;
for (const auto& kernel_execs : node->op_execs(step_)) {
const string& device = kernel_execs.first;
if (process_.find(device) == process_.end()) {
int64_t pid = AllocatePID();
process_[device] = std::make_unique<Process>(device, pid);
chrome_formatter_.EmitPID(GetTimeDevName(device), pid);
}
Process* p = process_[device].get();
for (const auto& exec : kernel_execs.second) {
int64_t start_micros = exec.first;
int64_t exec_micros = exec.second;
if (tnodes_[device].find(start_micros) == tnodes_[device].end()) {
tnodes_[device][start_micros] =
std::make_unique<TimeNode>(p, gnode, start_micros, exec_micros);
}
}
}
}
for (GraphNode* n : gnode->show_children) {
AllocateTimeNodes(n);
}
}
void Timeline::GenerateGraphTimeline(const std::vector<GraphNode*>& gnodes) {
for (GraphNode* gnode : gnodes) {
AllocateTimeNodes(gnode);
}
for (auto& process : tnodes_) {
if (!IsCanonicalDevice(process.first)) continue;
for (auto& tn : process.second) {
TimeNode* tnode = tn.second.get();
for (GraphNode* inp : tnode->node->children) {
if (!inp->account || !inp->Trackable(step_)) {
continue;
}
for (const auto& execs : inp->node->cpu_execs(step_)) {
if (!IsCanonicalDevice(execs.first)) continue;
if (process.first == execs.first) {
continue;
}
for (const auto& exec : execs.second) {
int64_t start_micros = exec.first;
auto cprocess = tnodes_.find(execs.first);
if (cprocess == tnodes_.end()) continue;
auto ctn = cprocess->second.find(start_micros);
if (ctn == cprocess->second.end()) continue;
ctn->second->next_tnodes.push_back(tnode);
}
}
}
}
}
AllocateLanes();
absl::FPrintF(stdout, "generating trace file.\n");
int64_t flow_id = 1;
for (const auto& process : alloc_nodes_) {
for (const auto& lane : process.second) {
for (const auto& node : lane.second) {
TimeNode* tnode = node.second;
Json::Value args(Json::objectValue);
args["name"] = Json::Value(tnode->name());
chrome_formatter_.EmitRegion(node.first, tnode->exec_micros,
process.first, lane.first, "Op",
tnode->name(), args);
for (TimeNode* next_tnode : node.second->next_tnodes) {
chrome_formatter_.EmitFlowStart(
tnode->name() + "_flow", tnode->start_micros + tnode->exec_micros,
process.first, lane.first, flow_id);
chrome_formatter_.EmitFlowEnd(
tnode->name() + "_flow", next_tnode->start_micros,
next_tnode->process->pid, next_tnode->tid, flow_id);
flow_id += 1;
}
}
}
}
for (const auto& dev : mem_tracker_.devices()) {
if (IsPlacedOnCPU(dev.first)) {
continue;
}
int64_t pid = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first), pid);
int64_t pid2 = AllocatePID();
chrome_formatter_.EmitPID(GetMemoryLaneName(dev.first) + " allocations",
pid2);
const MemoryTracker::Device& device = dev.second;
int64_t max_bytes_in_use = 0;
int64_t cur_bytes_in_use = 0;
int64_t last_point = 0;
for (const auto& alloc : device.allocations) {
cur_bytes_in_use = alloc.second;
max_bytes_in_use = std::max(max_bytes_in_use, cur_bytes_in_use);
int64_t ts = alloc.first;
if (ts - last_point < 100) continue;
last_point = ts;
std::map<int64_t, std::vector<string>> tensor_mem;
for (const auto& tensor_alloc_it : dev.second.tensor_allocs) {
const auto& tensor_alloc = tensor_alloc_it.second;
auto it = tensor_alloc.lower_bound(ts);
if (it != tensor_alloc.begin()) {
--it;
}
if (it->second > 0) {
tensor_mem[it->second].push_back(tensor_alloc_it.first);
}
}
chrome_formatter_.EmitCounter("Memory", "Memory Series", pid, ts,
dev.first, cur_bytes_in_use, tensor_mem);
}
if (IsPlacedOnAccelerator(dev.first)) {
absl::FPrintF(stdout, "%s peak memory: %.2f MB\n", dev.first,
max_bytes_in_use / 1000000.0);
}
}
OutputTimeline();
}
void Timeline::GenerateScopeTimeline(const ScopeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::GenerateCodeTimeline(const CodeNode* node) {
std::set<int64_t> visited_depth;
EmitTreeNode(node, 0, node->proto().total_exec_micros(), 0, &visited_depth);
OutputTimeline();
}
void Timeline::OutputTimeline() {
std::string outfile = absl::StrFormat("%s_%d", outfile_, step());
Status s =
WriteStringToFile(Env::Default(), outfile, chrome_formatter_.Format());
if (!s.ok()) {
absl::FPrintF(stderr, "Failed to write timeline file: %s\nError: %s\n",
outfile, s.ToString());
return;
}
absl::FPrintF(stdout,
"\n******************************************************\n");
absl::FPrintF(stdout,
"Timeline file is written to %s.\n"
"Open a Chrome browser, enter URL chrome:
"load the timeline file.",
outfile);
absl::FPrintF(stdout,
"\n******************************************************\n");
fflush(stdout);
}
void Timeline::AllocateLanes() {
for (auto& process : tnodes_) {
Process* p = process_[process.first].get();
for (auto& tnode : process.second) {
int64_t start_time = tnode.second->start_micros;
int64_t end_time = tnode.second->start_micros + tnode.second->exec_micros;
int64_t l = -1;
for (int64_t i = 0, end = p->lanes.size(); i < end; ++i) {
const auto& lane = p->lanes[i];
l = i;
for (auto cur_it = lane.rbegin(); cur_it != lane.rend(); ++cur_it) {
if (cur_it->second > start_time) {
l = -1;
break;
}
if (start_time > cur_it->second) {
break;
}
}
if (l >= 0) {
break;
}
}
if (l < 0) {
l = p->lanes.size();
std::map<int64_t, int64_t> nlane;
nlane[start_time] = end_time;
p->lanes.push_back(nlane);
} else {
p->lanes[l][start_time] = end_time;
}
tnode.second->tid = l;
alloc_nodes_[p->pid][l][start_time] = tnode.second.get();
}
}
}
int64_t Timeline::AllocatePID() {
int64_t cur_pid = next_pid_;
next_pid_ += 1;
return cur_pid;
}
}
} | #include <memory>
#include <utility>
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/internal/tfprof_constants.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/internal/tfprof_utils.h"
#include "tensorflow/core/profiler/tfprof_log.pb.h"
#include "tensorflow/core/profiler/tfprof_options.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfTimelineTest : public ::testing::Test {
protected:
TFProfTimelineTest() {
string graph_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/graph.pbtxt");
std::unique_ptr<tensorflow::GraphDef> graph_pb(new tensorflow::GraphDef());
TF_CHECK_OK(
ReadProtoFile(Env::Default(), graph_path, graph_pb.get(), false));
std::unique_ptr<tensorflow::RunMetadata> run_meta_pb(
new tensorflow::RunMetadata());
string run_meta_path =
io::JoinPath(testing::TensorFlowSrcRoot(),
"core/profiler/internal/testdata/run_meta");
TF_CHECK_OK(
ReadProtoFile(Env::Default(), run_meta_path, run_meta_pb.get(), true));
tf_stats_ = std::make_unique<TFStats>(
std::move(graph_pb), std::move(run_meta_pb), nullptr, nullptr);
tf_stats_->BuildAllViews();
}
std::unique_ptr<TFStats> tf_stats_;
};
TEST_F(TFProfTimelineTest, GraphView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(10000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("graph", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(16556121177519539380ull, Hash64(dump_str));
}
TEST_F(TFProfTimelineTest, ScopeView) {
string dump_file = io::JoinPath(testing::TmpDir(), "dump");
Options opts(5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, "name",
{".*"},
{".*"}, {""}, {".*"}, {""}, false,
{"params", "bytes", "micros", "float_ops"}, "timeline",
{{"outfile", dump_file}});
tf_stats_->ShowGraphNode("scope", opts);
string dump_str;
TF_CHECK_OK(ReadFileToString(Env::Default(), dump_file + "_0", &dump_str));
EXPECT_EQ(17545174915963890413ull, Hash64(dump_str));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_timeline.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/tfprof_timeline_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c56682b-83cb-4b2e-b235-8488d234e334 | cpp | google/tensorstore | transpose_op | tensorstore/index_space/internal/transpose_op.cc | tensorstore/index_space/transpose_op_test.cc | #include "tensorstore/index_space/internal/transpose_op.h"
#include <cassert>
#include <numeric>
#include "absl/status/status.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transpose.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
absl::Status MakePermutationFromMoveDimsTarget(
DimensionIndexBuffer* dimensions, DimensionIndex target,
span<DimensionIndex> permutation) {
if (dimensions->empty()) {
std::iota(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(0));
return absl::OkStatus();
}
const DimensionIndex input_rank = permutation.size();
const DimensionIndex num_dims = dimensions->size();
TENSORSTORE_ASSIGN_OR_RETURN(
target, NormalizeDimensionIndex(target, input_rank - num_dims + 1));
std::fill(permutation.begin(), permutation.end(),
static_cast<DimensionIndex>(-1));
DimensionSet moved_dims = false;
for (DimensionIndex i = 0; i < num_dims; ++i) {
DimensionIndex& input_dim = (*dimensions)[i];
moved_dims[input_dim] = true;
permutation[target + i] = input_dim;
input_dim = target + i;
}
for (DimensionIndex i = 0, orig_input_dim = 0; i < input_rank; ++i) {
if (permutation[i] != -1) continue;
while (moved_dims[orig_input_dim]) ++orig_input_dim;
permutation[i] = orig_input_dim++;
}
return absl::OkStatus();
}
}
Result<IndexTransform<>> ApplyMoveDimsTo(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
DimensionIndex target,
bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
DimensionIndex permutation[kMaxRank];
TENSORSTORE_RETURN_IF_ERROR(MakePermutationFromMoveDimsTarget(
dimensions, target, span<DimensionIndex>(&permutation[0], input_rank)));
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTranspose(IndexTransform<> transform,
DimensionIndexBuffer* dimensions,
bool domain_only) {
if (static_cast<DimensionIndex>(dimensions->size()) !=
transform.input_rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of dimensions (", dimensions->size(),
") must equal input_rank (", transform.input_rank(), ")."));
}
TransformRep::Ptr<> rep = TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)), *dimensions,
domain_only);
std::iota(dimensions->begin(), dimensions->end(),
static_cast<DimensionIndex>(0));
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> ApplyTransposeTo(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DimensionIndex> target_dimensions, bool domain_only) {
const DimensionIndex input_rank = transform.input_rank();
if (static_cast<DimensionIndex>(dimensions->size()) !=
target_dimensions.size()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Number of selected dimensions (", dimensions->size(),
") must equal number of target dimensions (", target_dimensions.size(),
")"));
}
DimensionSet seen_existing_dim = false;
DimensionIndex permutation[kMaxRank];
std::fill_n(permutation, input_rank, -1);
for (DimensionIndex i = 0; i < target_dimensions.size(); ++i) {
DimensionIndex& orig_dim = (*dimensions)[i];
TENSORSTORE_ASSIGN_OR_RETURN(
const DimensionIndex target_dim,
NormalizeDimensionIndex(target_dimensions[i], input_rank));
if (permutation[target_dim] != -1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Target dimension ", target_dim, " occurs more than once"));
}
seen_existing_dim[orig_dim] = true;
permutation[target_dim] = orig_dim;
orig_dim = target_dim;
}
for (DimensionIndex orig_dim = 0, target_dim = 0; orig_dim < input_rank;
++orig_dim) {
if (seen_existing_dim[orig_dim]) continue;
while (permutation[target_dim] != -1) ++target_dim;
permutation[target_dim] = orig_dim;
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
span<const DimensionIndex>(&permutation[0], input_rank), domain_only));
}
Result<IndexTransform<>> ApplyTransposeToDynamic(
IndexTransform<> transform, DimensionIndexBuffer* dimensions,
span<const DynamicDimSpec> target_dim_specs, bool domain_only) {
if (target_dim_specs.size() == 1) {
if (auto* target = std::get_if<DimensionIndex>(&target_dim_specs.front())) {
return ApplyMoveDimsTo(std::move(transform), dimensions, *target,
domain_only);
}
}
DimensionIndexBuffer target_dimensions;
const DimensionIndex input_rank = transform.input_rank();
for (const auto& s : target_dim_specs) {
if (auto* index = std::get_if<DimensionIndex>(&s)) {
target_dimensions.push_back(*index);
} else if (auto* r = std::get_if<DimRangeSpec>(&s)) {
TENSORSTORE_RETURN_IF_ERROR(
NormalizeDimRangeSpec(*r, input_rank, &target_dimensions));
} else {
return absl::InvalidArgumentError(
"Target dimensions cannot be specified by label");
}
}
return ApplyTransposeTo(std::move(transform), dimensions, target_dimensions,
domain_only);
}
Result<IndexTransform<>> ApplyTranspose(
IndexTransform<> transform, span<const DynamicDimSpec> source_dim_specs,
bool domain_only) {
DimensionIndexBuffer source_dimensions;
source_dimensions.reserve(transform.input_rank());
TENSORSTORE_RETURN_IF_ERROR(NormalizeDynamicDimSpecs(
source_dim_specs, transform.input_labels(), &source_dimensions));
if (!IsValidPermutation(source_dimensions)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Source dimension list ", span(source_dimensions),
" is not a valid dimension permutation for rank ",
transform.input_rank()));
}
return TransformAccess::Make<IndexTransform<>>(TransposeInputDimensions(
TransformAccess::rep_ptr<container>(std::move(transform)),
source_dimensions, domain_only));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/dim_expression_testutil.h"
#include "tensorstore/util/status.h"
namespace {
using ::tensorstore::Dims;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_index_space::EquivalentIndices;
using ::tensorstore::internal_index_space::TestDimExpression;
TEST(TransposeTest, Example) {
const auto original_transform = IndexTransformBuilder<3, 3>()
.input_origin({1, 2, 3})
.input_shape({3, 4, 2})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_identity_transform()
.Finalize()
.value();
const auto expected_new_transform = IndexTransformBuilder<3, 3>()
.input_origin({3, 1, 2})
.input_shape({2, 3, 4})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.input_labels({"z", "x", "y"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.Finalize()
.value();
const EquivalentIndices equivalent_indices = {{{2, 3, 4}, {4, 2, 3}}};
TestDimExpression(original_transform,
Dims(2, 0, 1).Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
TestDimExpression(original_transform,
Dims("z", "x", "y").Transpose(),
{0, 1, 2},
expected_new_transform,
expected_new_transform,
equivalent_indices);
}
TEST(TransposeTest, Simple) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
TEST(TransposeTest, Constant) {
TestDimExpression(IndexTransformBuilder<2, 2>()
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1, 0).Transpose(),
{0, 1},
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 0)
.Finalize()
.value(),
IndexTransformBuilder<2, 2>()
.input_origin({2, 1})
.input_shape({6, 5})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
{});
}
TEST(TransposeTest, ErrorHandling) {
TestDimExpressionError(
IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({5, 6})
.output_constant(0, 1)
.output_constant(1, 2)
.Finalize()
.value(),
Dims(1).Transpose(), absl::StatusCode::kInvalidArgument,
"Number of dimensions \\(1\\) must equal input_rank \\(2\\)\\.");
}
TEST(TransposeTest, Labeled) {
TestDimExpression(
IndexTransformBuilder<4, 2>()
.input_origin({1, 2, 3, 4})
.input_shape({5, 6, 4, 8})
.input_labels({"a", "b", "c", "d"})
.output_single_input_dimension(0, 1, 2, 1)
.output_index_array(
1, 2, 3, MakeArray<Index>({{{{1}, {2}, {3}, {4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
Dims(2, 0, 1, 3).Transpose(),
{0, 1, 2, 3},
IndexTransformBuilder<4, 4>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1)
.output_single_input_dimension(1, 2)
.output_single_input_dimension(2, 0)
.output_single_input_dimension(3, 3)
.Finalize()
.value(),
IndexTransformBuilder<4, 2>()
.input_origin({3, 1, 2, 4})
.input_shape({4, 5, 6, 8})
.input_labels({"c", "a", "b", "d"})
.output_single_input_dimension(0, 1, 2, 2)
.output_index_array(
1, 2, 3,
MakeArray<Index>(
{{{{1}}}, {{{2}}}, {{{3}}}, {{{4}}}}),
IndexInterval::Closed(-3, 10))
.Finalize()
.value(),
{{{2, 4, 3, 5}, {3, 2, 4, 5}}});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transpose_op.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transpose_op_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
210f416b-112b-4ae1-8653-76f0724b1b91 | cpp | tensorflow/tensorflow | hlo_execution_profile | third_party/xla/xla/service/hlo_execution_profile.cc | third_party/xla/xla/service/hlo_execution_profile_test.cc | #include "xla/service/hlo_execution_profile.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_execution_profile_data.pb.h"
#include "xla/service/human_readable_profile_builder.h"
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
HloProfileIndexMap::HloProfileIndexMap(
const HloModule& module, absl::Span<const std::string> extra_metrics) {
size_t current_profile_index = 0;
for (xla::HloComputation* computation : module.MakeComputationPostOrder()) {
InsertOrDie(&computation_to_profile_idx_, computation,
current_profile_index++);
for (const HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&instruction_to_profile_idx_, instruction,
current_profile_index++);
}
}
for (const std::string& key : extra_metrics) {
InsertOrDie(&extra_metric_to_profile_idx_, key, current_profile_index++);
}
}
std::unique_ptr<HloProfilePrinterData> CreateHloProfilePrinterData(
const HloProfileIndexMap& hlo_profile_index_map,
const HloCostAnalysis& cost_analysis,
absl::string_view entry_computation_name) {
using HloComputationInfo = HloProfilePrinterData::HloComputationInfo;
using HloInstructionInfo = HloProfilePrinterData::HloInstructionInfo;
size_t profile_counters_size = hlo_profile_index_map.total_count();
std::unique_ptr<HloProfilePrinterData> profile_printer_data =
std::make_unique<HloProfilePrinterData>();
profile_printer_data->set_profile_counters_size(profile_counters_size);
profile_printer_data->mutable_computation_infos()->Reserve(
hlo_profile_index_map.computation_count());
const auto& computation_to_profile_idx_map =
hlo_profile_index_map.computation_to_profile_idx();
std::vector<std::pair<const HloComputation*, int64_t>>
computation_and_profile_idx_list(computation_to_profile_idx_map.begin(),
computation_to_profile_idx_map.end());
absl::c_sort(computation_and_profile_idx_list,
[](const std::pair<const HloComputation*, int64_t>& left,
const std::pair<const HloComputation*, int64_t>& right) {
return left.second < right.second;
});
for (const auto& pair : computation_and_profile_idx_list) {
CHECK_LT(pair.second, profile_counters_size);
const HloComputation* computation = pair.first;
HloComputationInfo* computation_info =
profile_printer_data->add_computation_infos();
*computation_info->mutable_name() = std::string(computation->name());
computation_info->set_profile_index(pair.second);
computation_info->mutable_instruction_infos()->Reserve(
computation->instruction_count());
for (const HloInstruction* hlo : computation->instructions()) {
HloInstructionInfo* instruction_info =
computation_info->add_instruction_infos();
instruction_info->set_long_name(hlo->ToString());
instruction_info->set_short_name(hlo->ToString(
HloPrintOptions().set_compact_operands(true).set_print_operand_names(
false)));
instruction_info->set_category(hlo->ToCategory());
instruction_info->set_flop_count(cost_analysis.flop_count(*hlo));
instruction_info->set_transcendental_count(
cost_analysis.transcendental_count(*hlo));
instruction_info->set_bytes_accessed(cost_analysis.bytes_accessed(*hlo));
instruction_info->set_optimal_seconds(
cost_analysis.optimal_seconds(*hlo));
instruction_info->set_profile_index(
hlo_profile_index_map.GetProfileIndexFor(*hlo));
}
}
for (const auto& pair : hlo_profile_index_map.extra_metric_to_profile_idx()) {
profile_printer_data->mutable_extra_metrics()->insert(
{pair.first, pair.second});
}
*profile_printer_data->mutable_entry_computation() =
std::string(entry_computation_name);
return profile_printer_data;
}
HloExecutionProfile::HloExecutionProfile(
const HloProfilePrinterData* hlo_profile_printer_data,
const HloProfileIndexMap* hlo_profile_index_map)
: hlo_profile_printer_data_(*hlo_profile_printer_data),
hlo_profile_index_map_(*hlo_profile_index_map),
profile_counters_(
hlo_profile_index_map_.total_count(),
0) {}
void HloExecutionProfile::SetCyclesTakenBy(const HloInstruction* hlo,
uint64_t cycles_taken) {
SetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(*hlo),
cycles_taken);
}
void HloExecutionProfile::SetCyclesTakenBy(size_t index,
uint64_t cycles_taken) {
profile_counters_[index] = cycles_taken;
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(
const HloInstruction& hlo) const {
return GetCyclesTakenBy(hlo_profile_index_map_.GetProfileIndexFor(hlo));
}
uint64_t HloExecutionProfile::GetCyclesTakenBy(size_t index) const {
return profile_counters_[index];
}
} | #include "xla/service/hlo_execution_profile.h"
#include "absl/strings/str_cat.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
using absl::StrCat;
using ::testing::AllOf;
using ::testing::ContainsRegex;
class HloExecutionProfileTest : public HloTestBase {};
TEST_F(HloExecutionProfileTest, Basic) {
auto hlo_module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
lhs = f32[30,30]{1,0} parameter(0)
rhs = f32[30,30]{1,0} parameter(1)
add = f32[30,30]{1,0} add(lhs, rhs)
ROOT dot = f32[30,30]{1,0} dot(lhs, add), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const HloInstruction* dot_instruction =
hlo_module->entry_computation()->root_instruction();
const HloInstruction* add_instruction = dot_instruction->operand(1);
Shape shape = ShapeUtil::MakeShape(F32, {30, 30});
auto shape_size_function = [&](const Shape& shape) {
const int64_t pointer_size = 8;
if (shape.IsOpaque()) {
return pointer_size;
}
return ShapeUtil::ByteSizeOf(shape, pointer_size);
};
HloCostAnalysis cost_analysis(shape_size_function);
HloProfileIndexMap profile_index_map(*hlo_module);
std::unique_ptr<HloProfilePrinterData> profile_printer =
CreateHloProfilePrinterData(profile_index_map, cost_analysis,
hlo_module->entry_computation()->name());
HloExecutionProfile execution_profile(profile_printer.get(),
&profile_index_map);
const int64_t add_cycles = 1000;
const int64_t dot_cycles = 4000;
execution_profile.SetCyclesTakenBy(add_instruction, add_cycles);
execution_profile.SetCyclesTakenBy(dot_instruction, dot_cycles);
float clock_rate_ghz = backend()
.default_stream_executor()
->GetDeviceDescription()
.clock_rate_ghz();
EXPECT_THAT(execution_profile.ToString(clock_rate_ghz),
AllOf(ContainsRegex(StrCat(dot_cycles, " cycles.*%",
dot_instruction->name())),
ContainsRegex(StrCat(add_cycles, " cycles.*%",
add_instruction->name()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_execution_profile.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_execution_profile_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dffbdf66-83d7-4f5c-ae19-9de59b3de137 | cpp | google/arolla | tuple | arolla/expr/optimization/peephole_optimizations/tuple.cc | arolla/expr/optimization/peephole_optimizations/tuple_test.cc | #include "arolla/expr/optimization/peephole_optimizations/tuple.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
absl::StatusOr<ExprNodePtr> OptimizeTupleGet(ExprNodePtr expr) {
static Fingerprint make_tuple_fingerprint = MakeTupleOperator().fingerprint();
if (!expr->is_op()) {
return expr;
}
auto get_nth_operator =
fast_dynamic_downcast_final<const GetNthOperator*>(expr->op().get());
if (get_nth_operator == nullptr) {
return expr;
}
if (expr->node_deps().size() != 1) {
return expr;
}
auto tuple_expr = expr->node_deps()[0];
if (!tuple_expr->is_op()) {
return expr;
}
ASSIGN_OR_RETURN(auto tuple_op, DecayRegisteredOperator(tuple_expr->op()));
if (tuple_op->fingerprint() != make_tuple_fingerprint ||
tuple_expr->node_deps().size() <= get_nth_operator->index()) {
return expr;
}
return tuple_expr->node_deps()[get_nth_operator->index()];
}
absl::Status AppendGetNOptimizations(PeepholeOptimizationPack& optimizations) {
ASSIGN_OR_RETURN(
optimizations.emplace_back(),
PeepholeOptimization::CreateTransformOptimization(OptimizeTupleGet));
return absl::OkStatus();
}
}
absl::StatusOr<PeepholeOptimizationPack> TupleOptimizations() {
PeepholeOptimizationPack optimizations;
RETURN_IF_ERROR(AppendGetNOptimizations(optimizations));
return optimizations;
}
} | #include "arolla/expr/optimization/peephole_optimizations/tuple.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/optimization/peephole_optimizer.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::WithQTypeAnnotation;
class TupleOptimizationsTest : public ::testing::Test {
protected:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(optimizer_,
CreatePeepholeOptimizer({TupleOptimizations}));
}
std::unique_ptr<PeepholeOptimizer> optimizer_;
};
TEST_F(TupleOptimizationsTest, SingleSubstitution) {
auto a = Leaf("l1");
auto b = Leaf("l2");
auto c = Leaf("l3");
auto d = Leaf("l4");
ASSERT_OK_AND_ASSIGN(auto tuple, CallOp("core.make_tuple", {a, b, c, d}));
{
ASSERT_OK_AND_ASSIGN(auto get0, CallOp(GetNthOperator::Make(0), {tuple}));
EXPECT_THAT(optimizer_->Apply(get0), IsOkAndHolds(EqualsExpr(a)));
}
{
ASSERT_OK_AND_ASSIGN(auto get1, CallOp(GetNthOperator::Make(1), {tuple}));
EXPECT_THAT(optimizer_->Apply(get1), IsOkAndHolds(EqualsExpr(b)));
}
{
ASSERT_OK_AND_ASSIGN(auto get2, CallOp(GetNthOperator::Make(2), {tuple}));
EXPECT_THAT(optimizer_->Apply(get2), IsOkAndHolds(EqualsExpr(c)));
}
{
ASSERT_OK_AND_ASSIGN(auto get3, CallOp(GetNthOperator::Make(3), {tuple}));
EXPECT_THAT(optimizer_->Apply(get3), IsOkAndHolds(EqualsExpr(d)));
}
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(GetNthOperator::Make(0), {a}));
EXPECT_THAT(optimizer_->Apply(expr), IsOkAndHolds(EqualsExpr(expr)));
}
}
TEST_F(TupleOptimizationsTest, WorksWithConcatTuples) {
ASSERT_OK_AND_ASSIGN(auto a,
WithQTypeAnnotation(Leaf("a"), GetQType<int32_t>()));
ASSERT_OK_AND_ASSIGN(auto b,
WithQTypeAnnotation(Leaf("b"), GetQType<int64_t>()));
ASSERT_OK_AND_ASSIGN(
auto concat_tuples,
CallOp("core.concat_tuples",
{CallOp("core.make_tuple", {a, b}), CallOp("core.make_tuple", {b}),
CallOp("core.make_tuple", {a})}));
ASSERT_OK_AND_ASSIGN(auto lowest_concat_tuples, ToLowest(concat_tuples));
EXPECT_THAT(
optimizer_->Apply(lowest_concat_tuples),
IsOkAndHolds(EqualsExpr(CallOp("core.make_tuple", {a, b, b, a}))));
ASSERT_OK_AND_ASSIGN(auto get_2,
CallOp(GetNthOperator::Make(2), {concat_tuples}));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/tuple.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizations/tuple_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
b1600d3c-a287-4136-9fcf-cd1dff99ff27 | cpp | tensorflow/tensorflow | reduction_ops | tensorflow/compiler/tf2xla/kernels/reduction_ops.cc | tensorflow/core/kernels/reduction_ops_test.cc | #include "tensorflow/compiler/tf2xla/kernels/reduction_ops.h"
#include <cstdint>
#include <limits>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace {
class SumOp : public XlaReductionOp {
public:
explicit SumOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Sum").CompileTimeConstantInput("reduction_indices"),
SumOp);
class ProdOp : public XlaReductionOp {
public:
explicit ProdOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::One(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Mul(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Prod").CompileTimeConstantInput("reduction_indices"),
ProdOp);
class MinOp : public XlaReductionOp {
public:
explicit MinOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MaxValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Min(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Min").CompileTimeConstantInput("reduction_indices"),
MinOp);
class MaxOp : public XlaReductionOp {
public:
explicit MaxOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {
OP_REQUIRES_OK(ctx, PrimitiveTypeCheck(xla_reduction_type_));
}
static Status PrimitiveTypeCheck(xla::PrimitiveType xla_reduction_type) {
if (xla_reduction_type == xla::C64 || xla_reduction_type == xla::C128 ||
xla_reduction_type == xla::TUPLE ||
xla_reduction_type == xla::OPAQUE_TYPE) {
return errors::InvalidArgument(
"Unsupported PrimitiveType in MaxOp: '",
xla::PrimitiveType_Name(xla_reduction_type), "'");
} else {
return absl::OkStatus();
}
}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::MinValue(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Max(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Max").CompileTimeConstantInput("reduction_indices"),
MaxOp);
class MeanOp : public XlaReductionOp {
public:
explicit MeanOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx,
XlaHelpers::SumAccumulationType(ctx->input_type(0))) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::Zero(builder, xla_reduction_type_);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Add(scalar_lhs, scalar_rhs);
}
xla::XlaOp BuildFinalizer(
xla::XlaBuilder* builder, const xla::XlaOp& input,
const xla::XlaOp& reduce_output,
const std::vector<int64_t>& dimensions_to_reduce) override {
if (dimensions_to_reduce.empty()) {
return reduce_output;
}
xla::XlaOp result = reduce_output;
xla::Shape bounded_shape = builder->GetShape(input).value();
int64_t divisor_value = bounded_shape.dimensions(dimensions_to_reduce[0]);
auto divisor = xla::GetDimensionSize(input, dimensions_to_reduce[0]);
for (int i = 1; i < dimensions_to_reduce.size(); i++) {
int64_t size_value = bounded_shape.dimensions(dimensions_to_reduce[i]);
auto size = xla::GetDimensionSize(input, dimensions_to_reduce[i]);
if (size_value * divisor_value > std::numeric_limits<int32_t>::max()) {
result = result / xla::ConvertElementType(divisor, xla_reduction_type_);
divisor_value = size_value;
divisor = size;
} else {
divisor = xla::Mul(divisor, size);
divisor_value = size_value * divisor_value;
}
}
divisor = xla::ConvertElementType(divisor, xla_reduction_type_);
return XlaHelpers::ConvertElementType(result / divisor, input_type(0));
}
};
REGISTER_XLA_OP(Name("Mean").CompileTimeConstantInput("reduction_indices"),
MeanOp);
class AllOp : public XlaReductionOp {
public:
explicit AllOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, true);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::And(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("All").CompileTimeConstantInput("reduction_indices"),
AllOp);
class AnyOp : public XlaReductionOp {
public:
explicit AnyOp(OpKernelConstruction* ctx)
: XlaReductionOp(ctx, ctx->input_type(0)) {}
xla::XlaOp InitialValue(xla::XlaBuilder* builder) override {
return xla::ConstantR0<bool>(builder, false);
}
void BuildReducer(xla::XlaBuilder* builder, const xla::XlaOp& scalar_lhs,
const xla::XlaOp& scalar_rhs) override {
xla::Or(scalar_lhs, scalar_rhs);
}
};
REGISTER_XLA_OP(Name("Any").CompileTimeConstantInput("reduction_indices"),
AnyOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <typename T>
static Graph* ToScalar(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DataTypeToEnum<T>::value, TensorShape({num_x, num_y}));
data.flat<T>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ColReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 0;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* RowReduce(const string& reduce, int num_x, int num_y) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({num_x, num_y}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDYReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({1}));
axes.flat<int32>()(0) = 1;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
static Graph* ThreeDXZReduce(const string& reduce, int num_y, int num_z) {
auto* g = new Graph(OpRegistry::Global());
Tensor data(DT_FLOAT, TensorShape({4, num_y, num_z}));
data.flat<float>().setRandom();
Tensor axes(DT_INT32, TensorShape({2}));
axes.flat<int32>()(0) = 0;
axes.flat<int32>()(1) = 2;
test::graph::Reduce(g, reduce, test::graph::Constant(g, data),
test::graph::Constant(g, axes));
return g;
}
template <typename T>
static void ReduceToScalar(::testing::benchmark::State& state,
const string& device, const string& reduce,
int num_x, int num_y) {
test::Benchmark(device, ToScalar<T>(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(T));
}
static void DoRowReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, RowReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void DoColReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ColReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DYReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDYReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void Do3DXZReduce(::testing::benchmark::State& state,
const string& device, const string& reduce, int num_x,
int num_y) {
test::Benchmark(device, ThreeDXZReduce(reduce, num_x, num_y),
false)
.Run(state);
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * num_x *
num_y * sizeof(float));
}
static void BM_Sum2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUComplex(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<std::complex<float>>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUComplex)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DToScalarGPUHalf)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DRowReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoRowReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DRowReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum2DColumnReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
DoColReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum2DColumnReduceGPU)->RangePair(1, 8192, 1, 8192);
static void BM_Sum3DYReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DYReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DYReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Sum3DXZReduceGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
Do3DXZReduce(state, "gpu", "Sum", num_x, num_y);
}
BENCHMARK(BM_Sum3DXZReduceGPU)->RangePair(64, 4096, 64, 4096);
static void BM_Mean2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_EuclideanNorm2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "EuclideanNorm", num_x, num_y);
}
BENCHMARK(BM_EuclideanNorm2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Max2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Max", num_x, num_y);
}
BENCHMARK(BM_Max2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<float>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Min2DToScalarGPUHalf(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<Eigen::half>(state, "gpu", "Min", num_x, num_y);
}
BENCHMARK(BM_Min2DToScalarGPUHalf)->RangePair(2048, 8192, 2048, 8192);
static void BM_Bool2DToScalarGPU(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bool>(state, "gpu", "All", num_x, num_y);
}
BENCHMARK(BM_Bool2DToScalarGPU)->RangePair(2048, 8192, 2048, 8192);
static void BM_Mean2DToScalarCPUBF16(::testing::benchmark::State& state) {
const int num_x = state.range(0);
const int num_y = state.range(1);
ReduceToScalar<bfloat16>(state, "cpu", "Mean", num_x, num_y);
}
BENCHMARK(BM_Mean2DToScalarCPUBF16)->RangePair(2048, 8192, 2048, 8192);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/reduction_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/reduction_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7cbce453-224d-4a29-a1d3-b3103ddb6369 | cpp | google/cel-cpp | container_backed_map_impl | eval/public/containers/container_backed_map_impl.cc | eval/public/containers/container_backed_map_impl_test.cc | #include "eval/public/containers/container_backed_map_impl.h"
#include <memory>
#include <utility>
#include "absl/container/node_hash_map.h"
#include "absl/hash/hash.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "eval/public/cel_value.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
class HasherOp {
public:
template <class T>
size_t operator()(const T& arg) {
return std::hash<T>()(arg);
}
size_t operator()(const absl::Time arg) {
return absl::Hash<absl::Time>()(arg);
}
size_t operator()(const absl::Duration arg) {
return absl::Hash<absl::Duration>()(arg);
}
size_t operator()(const CelValue::StringHolder& arg) {
return absl::Hash<absl::string_view>()(arg.value());
}
size_t operator()(const CelValue::BytesHolder& arg) {
return absl::Hash<absl::string_view>()(arg.value());
}
size_t operator()(const CelValue::CelTypeHolder& arg) {
return absl::Hash<absl::string_view>()(arg.value());
}
size_t operator()(const std::nullptr_t&) { return 0; }
};
template <class T>
class EqualOp {
public:
explicit EqualOp(const T& arg) : arg_(arg) {}
template <class U>
bool operator()(const U&) const {
return false;
}
bool operator()(const T& other) const { return other == arg_; }
private:
const T& arg_;
};
class CelValueEq {
public:
explicit CelValueEq(const CelValue& other) : other_(other) {}
template <class Type>
bool operator()(const Type& arg) {
return other_.template Visit<bool>(EqualOp<Type>(arg));
}
private:
const CelValue& other_;
};
}
absl::optional<CelValue> CelMapBuilder::operator[](CelValue cel_key) const {
auto item = values_map_.find(cel_key);
if (item == values_map_.end()) {
return absl::nullopt;
}
return item->second;
}
absl::Status CelMapBuilder::Add(CelValue key, CelValue value) {
auto [unused, inserted] = values_map_.emplace(key, value);
if (!inserted) {
return absl::InvalidArgumentError("duplicate map keys");
}
key_list_.Add(key);
return absl::OkStatus();
}
size_t CelMapBuilder::Hasher::operator()(const CelValue& key) const {
return key.template Visit<size_t>(HasherOp());
}
bool CelMapBuilder::Equal::operator()(const CelValue& key1,
const CelValue& key2) const {
if (key1.type() != key2.type()) {
return false;
}
return key1.template Visit<bool>(CelValueEq(key2));
}
absl::StatusOr<std::unique_ptr<CelMap>> CreateContainerBackedMap(
absl::Span<const std::pair<CelValue, CelValue>> key_values) {
auto map = std::make_unique<CelMapBuilder>();
for (const auto& key_value : key_values) {
CEL_RETURN_IF_ERROR(map->Add(key_value.first, key_value.second));
}
return map;
}
}
}
}
} | #include "eval/public/containers/container_backed_map_impl.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "eval/public/cel_value.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::IsNull;
using ::testing::Not;
TEST(ContainerBackedMapImplTest, TestMapInt64) {
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateInt64(1), CelValue::CreateInt64(2)},
{CelValue::CreateInt64(2), CelValue::CreateInt64(3)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
ASSERT_THAT(cel_map, Not(IsNull()));
EXPECT_THAT(cel_map->size(), Eq(2));
auto lookup1 = (*cel_map)[CelValue::CreateInt64(1)];
ASSERT_TRUE(lookup1);
CelValue cel_value = lookup1.value();
ASSERT_TRUE(cel_value.IsInt64());
EXPECT_THAT(cel_value.Int64OrDie(), 2);
auto lookup2 = (*cel_map)[CelValue::CreateUint64(1)];
ASSERT_FALSE(lookup2);
auto lookup3 = (*cel_map)[CelValue::CreateInt64(3)];
ASSERT_FALSE(lookup3);
}
TEST(ContainerBackedMapImplTest, TestMapUint64) {
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateUint64(1), CelValue::CreateInt64(2)},
{CelValue::CreateUint64(2), CelValue::CreateInt64(3)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
ASSERT_THAT(cel_map, Not(IsNull()));
EXPECT_THAT(cel_map->size(), Eq(2));
auto lookup1 = (*cel_map)[CelValue::CreateUint64(1)];
ASSERT_TRUE(lookup1);
CelValue cel_value = lookup1.value();
ASSERT_TRUE(cel_value.IsInt64());
EXPECT_THAT(cel_value.Int64OrDie(), 2);
auto lookup2 = (*cel_map)[CelValue::CreateInt64(1)];
ASSERT_FALSE(lookup2);
auto lookup3 = (*cel_map)[CelValue::CreateUint64(3)];
ASSERT_FALSE(lookup3);
}
TEST(ContainerBackedMapImplTest, TestMapString) {
const std::string kKey1 = "1";
const std::string kKey2 = "2";
const std::string kKey3 = "3";
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(&kKey1), CelValue::CreateInt64(2)},
{CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)}};
auto cel_map =
CreateContainerBackedMap(
absl::Span<std::pair<CelValue, CelValue>>(args.data(), args.size()))
.value();
ASSERT_THAT(cel_map, Not(IsNull()));
EXPECT_THAT(cel_map->size(), Eq(2));
auto lookup1 = (*cel_map)[CelValue::CreateString(&kKey1)];
ASSERT_TRUE(lookup1);
CelValue cel_value = lookup1.value();
ASSERT_TRUE(cel_value.IsInt64());
EXPECT_THAT(cel_value.Int64OrDie(), 2);
auto lookup2 = (*cel_map)[CelValue::CreateInt64(1)];
ASSERT_FALSE(lookup2);
auto lookup3 = (*cel_map)[CelValue::CreateString(&kKey3)];
ASSERT_FALSE(lookup3);
}
TEST(CelMapBuilder, TestMapString) {
const std::string kKey1 = "1";
const std::string kKey2 = "2";
const std::string kKey3 = "3";
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(&kKey1), CelValue::CreateInt64(2)},
{CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)}};
CelMapBuilder builder;
ASSERT_OK(
builder.Add(CelValue::CreateString(&kKey1), CelValue::CreateInt64(2)));
ASSERT_OK(
builder.Add(CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)));
CelMap* cel_map = &builder;
ASSERT_THAT(cel_map, Not(IsNull()));
EXPECT_THAT(cel_map->size(), Eq(2));
auto lookup1 = (*cel_map)[CelValue::CreateString(&kKey1)];
ASSERT_TRUE(lookup1);
CelValue cel_value = lookup1.value();
ASSERT_TRUE(cel_value.IsInt64());
EXPECT_THAT(cel_value.Int64OrDie(), 2);
auto lookup2 = (*cel_map)[CelValue::CreateInt64(1)];
ASSERT_FALSE(lookup2);
auto lookup3 = (*cel_map)[CelValue::CreateString(&kKey3)];
ASSERT_FALSE(lookup3);
}
TEST(CelMapBuilder, RepeatKeysFail) {
const std::string kKey1 = "1";
const std::string kKey2 = "2";
std::vector<std::pair<CelValue, CelValue>> args = {
{CelValue::CreateString(&kKey1), CelValue::CreateInt64(2)},
{CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)}};
CelMapBuilder builder;
ASSERT_OK(
builder.Add(CelValue::CreateString(&kKey1), CelValue::CreateInt64(2)));
ASSERT_OK(
builder.Add(CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)));
EXPECT_THAT(
builder.Add(CelValue::CreateString(&kKey2), CelValue::CreateInt64(3)),
StatusIs(absl::StatusCode::kInvalidArgument, "duplicate map keys"));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/container_backed_map_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/containers/container_backed_map_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
77752fe8-7d7c-4e1f-b5a5-4603ebf65abd | cpp | tensorflow/tensorflow | calibration_parameters | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_CALIBRATION_PARAMETERS_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_CALIBRATION_CALIBRATION_PARAMETERS_H_
#include <algorithm>
#include <cmath>
#include <cstdint>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
namespace stablehlo::quantization {
inline float CalculateBinWidth(const float min_value, const float max_value,
const int32_t num_bins) {
const float raw_bin_width = (max_value - min_value) / num_bins;
return std::pow(2, std::ceil(std::log2(raw_bin_width)));
}
inline float CalculateLowerBound(const float min_value, const float bin_width) {
return std::floor(min_value / bin_width) * bin_width;
}
inline int32_t CalculateBinIndex(const float value, const float lower_bound,
const float bin_width) {
return std::floor((value - lower_bound) / bin_width);
}
inline int32_t CalculateBinIndexSafe(const float value, const float lower_bound,
const float bin_width,
const int32_t num_bins) {
const int32_t bin_index = CalculateBinIndex(value, lower_bound, bin_width);
return std::clamp(bin_index, 0, num_bins - 1);
}
inline bool IsHistogramCalibration(
const CalibrationOptions::CalibrationMethod method) {
return method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE ||
method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE ||
method == CalibrationOptions::
CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY ||
method ==
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC;
}
inline int32_t GetNumBins(const CalibrationOptions& calib_opts) {
return IsHistogramCalibration(calib_opts.calibration_method())
? calib_opts.calibration_parameters().num_bins()
: 0;
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h"
#include <cmath>
#include <cstdint>
#include <gtest/gtest.h>
namespace stablehlo::quantization {
namespace {
inline int32_t CalculateActualNumBins(const float min_value,
const float max_value,
const float bin_width) {
const float lower_bound = CalculateLowerBound(min_value, bin_width);
return std::ceil((max_value - lower_bound) / bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthSmallerThanOne) {
float bin_width = CalculateBinWidth(0.0, 25.0,
256);
EXPECT_FLOAT_EQ(bin_width, 0.125);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 25.0, bin_width);
EXPECT_EQ(actual_num_bins, 200);
float raw_bin_width = 25.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthLargerThanOne) {
float bin_width = CalculateBinWidth(0.0, 360.0,
256);
EXPECT_FLOAT_EQ(bin_width, 2.0);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 360.0, bin_width);
EXPECT_EQ(actual_num_bins, 180);
float raw_bin_width = 360.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateBinWidthDivisible) {
float bin_width = CalculateBinWidth(0.0, 256.0,
256);
EXPECT_FLOAT_EQ(bin_width, 1.0);
int32_t actual_num_bins =
CalculateActualNumBins(0.0, 256.0, bin_width);
EXPECT_EQ(actual_num_bins, 256);
float raw_bin_width = 256.0 / actual_num_bins;
EXPECT_FLOAT_EQ(bin_width, raw_bin_width);
}
TEST(CalibrationParametersTest, CalculateNumBinsDivisible) {
int32_t num_bins = CalculateActualNumBins(
0.0, 4.0, 2.0);
EXPECT_EQ(num_bins, 2);
}
TEST(CalibrationParametersTest, CalculateNumBinsNotDivisible) {
int32_t num_bins = CalculateActualNumBins(
0.0, 5.0, 2.0);
EXPECT_EQ(num_bins, 3);
}
TEST(CalibrationParametersTest, CalculateBinIndex) {
int32_t bin_index = CalculateBinIndexSafe(3.0, 0.0,
2.0, 2);
EXPECT_EQ(bin_index, 1);
}
TEST(CalibrationParametersTest, CalculateBinIndexMaxValue) {
int32_t bin_index = CalculateBinIndexSafe(4.0, 0.0,
2.0, 2);
EXPECT_EQ(bin_index, 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/calibration_parameters_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30e35d91-5cf7-4732-a738-7228eeaa41a6 | cpp | google/quiche | moqt_subscribe_windows | quiche/quic/moqt/moqt_subscribe_windows.cc | quiche/quic/moqt/moqt_subscribe_windows_test.cc | #include "quiche/quic/moqt/moqt_subscribe_windows.h"
#include <optional>
#include <vector>
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/web_transport/web_transport.h"
namespace moqt {
bool SubscribeWindow::InWindow(const FullSequence& seq) const {
if (seq < start_) {
return false;
}
return (!end_.has_value() || seq <= *end_);
}
std::optional<webtransport::StreamId> SendStreamMap::GetStreamForSequence(
FullSequence sequence) const {
ReducedSequenceIndex index(sequence, forwarding_preference_);
auto stream_it = send_streams_.find(index);
if (stream_it == send_streams_.end()) {
return std::nullopt;
}
return stream_it->second;
}
void SendStreamMap::AddStream(FullSequence sequence,
webtransport::StreamId stream_id) {
ReducedSequenceIndex index(sequence, forwarding_preference_);
if (forwarding_preference_ == MoqtForwardingPreference::kDatagram) {
QUIC_BUG(quic_bug_moqt_draft_03_01) << "Adding a stream for datagram";
return;
}
auto [stream_it, success] = send_streams_.emplace(index, stream_id);
QUIC_BUG_IF(quic_bug_moqt_draft_03_02, !success) << "Stream already added";
}
void SendStreamMap::RemoveStream(FullSequence sequence,
webtransport::StreamId stream_id) {
ReducedSequenceIndex index(sequence, forwarding_preference_);
QUICHE_DCHECK(send_streams_.contains(index) &&
send_streams_.find(index)->second == stream_id)
<< "Requested to remove a stream ID that does not match the one in the "
"map";
send_streams_.erase(index);
}
bool SubscribeWindow::UpdateStartEnd(FullSequence start,
std::optional<FullSequence> end) {
if (!InWindow(start)) {
return false;
}
if (end_.has_value() && (!end.has_value() || *end_ < *end)) {
return false;
}
start_ = start;
end_ = end;
return true;
}
ReducedSequenceIndex::ReducedSequenceIndex(
FullSequence sequence, MoqtForwardingPreference preference) {
switch (preference) {
case MoqtForwardingPreference::kTrack:
sequence_ = FullSequence(0, 0);
break;
case MoqtForwardingPreference::kSubgroup:
sequence_ = FullSequence(sequence.group, 0);
break;
case MoqtForwardingPreference::kDatagram:
sequence_ = sequence;
return;
}
}
std::vector<webtransport::StreamId> SendStreamMap::GetAllStreams() const {
std::vector<webtransport::StreamId> ids;
for (const auto& [index, id] : send_streams_) {
ids.push_back(id);
}
return ids;
}
} | #include "quiche/quic/moqt/moqt_subscribe_windows.h"
#include <cstdint>
#include <optional>
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/common/platform/api/quiche_export.h"
namespace moqt {
namespace test {
class QUICHE_EXPORT SubscribeWindowTest : public quic::test::QuicTest {
public:
SubscribeWindowTest() {}
const uint64_t subscribe_id_ = 2;
const FullSequence start_{4, 0};
const FullSequence end_{5, 5};
};
TEST_F(SubscribeWindowTest, Queries) {
SubscribeWindow window(start_, end_);
EXPECT_TRUE(window.InWindow(FullSequence(4, 0)));
EXPECT_TRUE(window.InWindow(FullSequence(5, 5)));
EXPECT_FALSE(window.InWindow(FullSequence(5, 6)));
EXPECT_FALSE(window.InWindow(FullSequence(6, 0)));
EXPECT_FALSE(window.InWindow(FullSequence(3, 12)));
}
TEST_F(SubscribeWindowTest, AddQueryRemoveStreamIdTrack) {
SendStreamMap stream_map(MoqtForwardingPreference::kTrack);
stream_map.AddStream(FullSequence{4, 0}, 2);
EXPECT_QUIC_BUG(stream_map.AddStream(FullSequence{5, 2}, 6),
"Stream already added");
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(5, 2)), 2);
stream_map.RemoveStream(FullSequence{7, 2}, 2);
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(4, 0)), std::nullopt);
}
TEST_F(SubscribeWindowTest, AddQueryRemoveStreamIdSubgroup) {
SendStreamMap stream_map(MoqtForwardingPreference::kSubgroup);
stream_map.AddStream(FullSequence{4, 0}, 2);
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(5, 0)), std::nullopt);
stream_map.AddStream(FullSequence{5, 2}, 6);
EXPECT_QUIC_BUG(stream_map.AddStream(FullSequence{5, 3}, 6),
"Stream already added");
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(4, 1)), 2);
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(5, 0)), 6);
stream_map.RemoveStream(FullSequence{5, 1}, 6);
EXPECT_EQ(stream_map.GetStreamForSequence(FullSequence(5, 2)), std::nullopt);
}
TEST_F(SubscribeWindowTest, AddQueryRemoveStreamIdDatagram) {
SendStreamMap stream_map(MoqtForwardingPreference::kDatagram);
EXPECT_QUIC_BUG(stream_map.AddStream(FullSequence{4, 0}, 2),
"Adding a stream for datagram");
}
TEST_F(SubscribeWindowTest, UpdateStartEnd) {
SubscribeWindow window(start_, end_);
EXPECT_TRUE(window.UpdateStartEnd(start_.next(),
FullSequence(end_.group, end_.object - 1)));
EXPECT_FALSE(window.InWindow(FullSequence(start_.group, start_.object)));
EXPECT_FALSE(window.InWindow(FullSequence(end_.group, end_.object)));
EXPECT_FALSE(
window.UpdateStartEnd(start_, FullSequence(end_.group, end_.object - 1)));
EXPECT_FALSE(window.UpdateStartEnd(start_.next(), end_));
}
TEST_F(SubscribeWindowTest, UpdateStartEndOpenEnded) {
SubscribeWindow window(start_, std::nullopt);
EXPECT_TRUE(window.UpdateStartEnd(start_, end_));
EXPECT_FALSE(window.InWindow(end_.next()));
EXPECT_FALSE(window.UpdateStartEnd(start_, std::nullopt));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_subscribe_windows.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_subscribe_windows_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
243c6b17-25a6-4704-95ee-94c556bbfcf9 | cpp | google/langsvr | result | include/langsvr/result.h | src/result_test.cc | #ifndef LANGSVR_UTILS_RESULT_RESULT_H_
#define LANGSVR_UTILS_RESULT_RESULT_H_
#include <cassert>
#include <ostream>
#include <string>
#include <utility>
#include <variant>
#include "langsvr/traits.h"
namespace langsvr {
struct SuccessType {};
static constexpr const SuccessType Success;
struct Failure {
std::string reason;
};
static inline std::ostream& operator<<(std::ostream& out, const Failure& failure) {
return out << failure.reason;
}
template <typename SUCCESS_TYPE, typename FAILURE_TYPE = Failure>
struct [[nodiscard]] Result {
using ResultSuccess = SUCCESS_TYPE;
using ResultFailure = FAILURE_TYPE;
static_assert(!std::is_same_v<SUCCESS_TYPE, FAILURE_TYPE>,
"Result must not have the same type for SUCCESS_TYPE and FAILURE_TYPE");
Result() : value(std::monostate{}) {}
Result(const SUCCESS_TYPE& success) : value{success} {}
Result(SUCCESS_TYPE&& success) : value(std::move(SUCCESS_TYPE(std::move(success)))) {}
Result(const FAILURE_TYPE& failure) : value{failure} {}
Result(FAILURE_TYPE&& failure) : value{std::move(failure)} {}
template <typename S,
typename F,
typename = std::void_t<decltype(SUCCESS_TYPE{std::declval<S>()}),
decltype(FAILURE_TYPE{std::declval<F>()})>>
Result(const Result<S, F>& other) {
if (other == Success) {
value = SUCCESS_TYPE{other.Get()};
} else {
value = FAILURE_TYPE{other.Failure()};
}
}
const SUCCESS_TYPE* operator->() const {
Validate();
return &(Get());
}
SUCCESS_TYPE* operator->() {
Validate();
return &(Get());
}
const SUCCESS_TYPE& Get() const {
Validate();
return std::get<SUCCESS_TYPE>(value);
}
SUCCESS_TYPE& Get() {
Validate();
return std::get<SUCCESS_TYPE>(value);
}
SUCCESS_TYPE&& Move() {
Validate();
return std::get<SUCCESS_TYPE>(std::move(value));
}
const FAILURE_TYPE& Failure() const {
Validate();
return std::get<FAILURE_TYPE>(value);
}
template <typename T>
bool operator==(const Result& other) const {
return value == other.value;
}
template <typename T>
bool operator==(const T& val) const {
Validate();
using D = std::decay_t<T>;
static constexpr bool is_success = std::is_same_v<D, SuccessType>;
static constexpr bool is_success_ty =
std::is_same_v<D, SUCCESS_TYPE> ||
(IsStringLike<SUCCESS_TYPE> && IsStringLike<D>);
static constexpr bool is_failure_ty =
std::is_same_v<D, FAILURE_TYPE> ||
(IsStringLike<FAILURE_TYPE> && IsStringLike<D>);
static_assert(is_success || is_success_ty || is_failure_ty,
"unsupported type for Result equality operator");
static_assert(!(is_success_ty && is_failure_ty),
"ambiguous success / failure type for Result equality operator");
if constexpr (is_success) {
return std::holds_alternative<SUCCESS_TYPE>(value);
} else if constexpr (is_success_ty) {
if (auto* v = std::get_if<SUCCESS_TYPE>(&value)) {
return *v == val;
}
return false;
} else if constexpr (is_failure_ty) {
if (auto* v = std::get_if<FAILURE_TYPE>(&value)) {
return *v == val;
}
return false;
}
}
template <typename T>
bool operator!=(const T& val) const {
return !(*this == val);
}
private:
void Validate() const { assert(!std::holds_alternative<std::monostate>(value)); }
std::variant<std::monostate, SUCCESS_TYPE, FAILURE_TYPE> value;
};
template <typename SUCCESS, typename FAILURE>
static inline std::ostream& operator<<(std::ostream& out, const Result<SUCCESS, FAILURE>& res) {
if (res == Success) {
if constexpr (HasOperatorShiftLeft<std::ostream&, SUCCESS>) {
return out << "success: " << res.Get();
} else {
return out << "success";
}
} else {
if constexpr (HasOperatorShiftLeft<std::ostream&, FAILURE>) {
return out << "failure: " << res.Failure();
} else {
return out << "failure";
}
}
}
}
#endif | #include "langsvr/result.h"
#include <string>
#include "gmock/gmock.h"
namespace langsvr {
namespace {
struct S {
int value;
};
static inline bool operator==(const S& a, const S& b) {
return a.value == b.value;
}
TEST(ResultTest, SuccessInt) {
auto r = Result<int>(123);
ASSERT_EQ(r, Success);
EXPECT_EQ(r.Get(), 123);
}
TEST(ResultTest, SuccessStruct) {
auto r = Result<S>({123});
ASSERT_EQ(r, Success);
EXPECT_EQ(r->value, 123);
EXPECT_EQ(r, S{123});
}
TEST(ResultTest, Failure) {
auto r = Result<int>(Failure{});
EXPECT_NE(r, Success);
}
TEST(ResultTest, CustomFailure) {
auto r = Result<int, std::string>("oh noes!");
EXPECT_NE(r, Success);
EXPECT_EQ(r.Failure(), "oh noes!");
}
TEST(ResultTest, ValueCast) {
struct X {};
struct Y : X {};
Y* y = nullptr;
auto r_y = Result<Y*>{y};
auto r_x = Result<X*>{r_y};
(void)r_x;
(void)r_y;
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/include/langsvr/result.h | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/result_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
6e6030d9-01ce-44e0-b935-a3ee95e13c9b | cpp | google/quiche | priority_update_payload_decoder | quiche/http2/decoder/payload_decoders/priority_update_payload_decoder.cc | quiche/http2/decoder/payload_decoders/priority_update_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/priority_update_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
PriorityUpdatePayloadDecoder::PayloadState v) {
switch (v) {
case PriorityUpdatePayloadDecoder::PayloadState::kStartDecodingFixedFields:
return out << "kStartDecodingFixedFields";
case PriorityUpdatePayloadDecoder::PayloadState::kResumeDecodingFixedFields:
return out << "kResumeDecodingFixedFields";
case PriorityUpdatePayloadDecoder::PayloadState::kHandleFixedFieldsStatus:
return out << "kHandleFixedFieldsStatus";
case PriorityUpdatePayloadDecoder::PayloadState::kReadPriorityFieldValue:
return out << "kReadPriorityFieldValue";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_173_1)
<< "Invalid PriorityUpdatePayloadDecoder::PayloadState: " << unknown;
return out << "PriorityUpdatePayloadDecoder::PayloadState(" << unknown << ")";
}
DecodeStatus PriorityUpdatePayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "PriorityUpdatePayloadDecoder::StartDecodingPayload: "
<< state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::PRIORITY_UPDATE, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
QUICHE_DCHECK_EQ(0, state->frame_header().flags);
state->InitializeRemainders();
payload_state_ = PayloadState::kStartDecodingFixedFields;
return ResumeDecodingPayload(state, db);
}
DecodeStatus PriorityUpdatePayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "PriorityUpdatePayloadDecoder::ResumeDecodingPayload: "
"remaining_payload="
<< state->remaining_payload()
<< ", db->Remaining=" << db->Remaining();
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::PRIORITY_UPDATE, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
QUICHE_DCHECK_NE(PayloadState::kHandleFixedFieldsStatus, payload_state_);
DecodeStatus status = DecodeStatus::kDecodeError;
size_t avail;
while (true) {
QUICHE_DVLOG(2)
<< "PriorityUpdatePayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
switch (payload_state_) {
case PayloadState::kStartDecodingFixedFields:
status = state->StartDecodingStructureInPayload(
&priority_update_fields_, db);
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kHandleFixedFieldsStatus:
if (status == DecodeStatus::kDecodeDone) {
state->listener()->OnPriorityUpdateStart(frame_header,
priority_update_fields_);
} else {
QUICHE_DCHECK((status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError &&
state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
payload_state_ = PayloadState::kResumeDecodingFixedFields;
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadPriorityFieldValue:
avail = db->Remaining();
if (avail > 0) {
state->listener()->OnPriorityUpdatePayload(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadPriorityFieldValue;
return DecodeStatus::kDecodeInProgress;
}
state->listener()->OnPriorityUpdateEnd();
return DecodeStatus::kDecodeDone;
case PayloadState::kResumeDecodingFixedFields:
status = state->ResumeDecodingStructureInPayload(
&priority_update_fields_, db);
payload_state_ = PayloadState::kHandleFixedFieldsStatus;
continue;
}
QUICHE_BUG(http2_bug_173_2) << "PayloadState: " << payload_state_;
}
}
} | #include "quiche/http2/decoder/payload_decoders/priority_update_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class PriorityUpdatePayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::PRIORITY_UPDATE;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnPriorityUpdateStart(
const Http2FrameHeader& header,
const Http2PriorityUpdateFields& priority_update) override {
QUICHE_VLOG(1) << "OnPriorityUpdateStart header: " << header
<< "; priority_update: " << priority_update;
StartFrame(header)->OnPriorityUpdateStart(header, priority_update);
}
void OnPriorityUpdatePayload(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnPriorityUpdatePayload: len=" << len;
CurrentFrame()->OnPriorityUpdatePayload(data, len);
}
void OnPriorityUpdateEnd() override {
QUICHE_VLOG(1) << "OnPriorityUpdateEnd";
EndFrame()->OnPriorityUpdateEnd();
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class PriorityUpdatePayloadDecoderTest
: public AbstractPayloadDecoderTest<PriorityUpdatePayloadDecoder,
PriorityUpdatePayloadDecoderPeer,
Listener> {};
TEST_F(PriorityUpdatePayloadDecoderTest, Truncated) {
auto approve_size = [](size_t size) {
return size != Http2PriorityUpdateFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(Http2PriorityUpdateFields(123));
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
class PriorityUpdatePayloadLengthTests
: public AbstractPayloadDecoderTest<PriorityUpdatePayloadDecoder,
PriorityUpdatePayloadDecoderPeer,
Listener>,
public ::testing::WithParamInterface<uint32_t> {
protected:
PriorityUpdatePayloadLengthTests() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, PriorityUpdatePayloadLengthTests,
::testing::Values(0, 1, 2, 3, 4, 5, 6));
TEST_P(PriorityUpdatePayloadLengthTests, ValidLength) {
Http2PriorityUpdateFields priority_update;
Randomize(&priority_update, RandomPtr());
std::string priority_field_value = Random().RandString(length_);
Http2FrameBuilder fb;
fb.Append(priority_update);
fb.Append(priority_field_value);
Http2FrameHeader header(fb.size(), Http2FrameType::PRIORITY_UPDATE,
RandFlags(), RandStreamId());
set_frame_header(header);
FrameParts expected(header, priority_field_value);
expected.SetOptPriorityUpdate(Http2PriorityUpdateFields{priority_update});
ASSERT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/priority_update_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/priority_update_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4fb9f5f6-6c15-4a67-965b-bbbacbf591aa | cpp | tensorflow/tensorflow | fusion_merger | third_party/xla/xla/service/gpu/transforms/fusion_merger.cc | third_party/xla/xla/service/gpu/transforms/fusion_merger_test.cc | #include "xla/service/gpu/transforms/fusion_merger.h"
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
class FusionInstructionMerger {
public:
explicit FusionInstructionMerger(
HloComputation* computation, const se::DeviceDescription& gpu_device_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function)
: computation_(computation),
shape_size_function_(shape_size_function),
gpu_device_info_(gpu_device_info),
dump_fusion_visualization_(computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization()) {}
absl::Status Run();
bool changed() const { return changed_; }
private:
FusionDecision ShouldFuse(HloInstruction* producer);
absl::Status FuseIntoAllUsers(HloInstruction* producer);
HloComputation* computation_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
std::optional<GpuHloCostAnalysis> cost_analysis_;
FusionInfoCache fusion_info_cache_;
const se::DeviceDescription& gpu_device_info_;
bool changed_ = false;
bool dump_fusion_visualization_ = false;
int total_visited_ = 0;
int total_merged_ = 0;
int num_fail_no_users_ = 0;
int num_fail_not_loop_fusion_ = 0;
int num_fail_merge_all_users_ = 0;
int num_fail_inefficient_fusion_emitter_ = 0;
int num_fail_fusion_too_large_ = 0;
int num_fail_uncoalesced_read_ = 0;
int num_fail_slower_if_fused_ = 0;
FusionInstructionMerger(const FusionInstructionMerger&) = delete;
FusionInstructionMerger& operator=(const FusionInstructionMerger&) = delete;
};
absl::Status FusionInstructionMerger::FuseIntoAllUsers(
HloInstruction* producer) {
std::vector<HloInstruction*> users = producer->users();
for (HloInstruction* user : users) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*user,
producer);
}
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user));
HloInstruction* consumer = user;
if (consumer->opcode() != HloOpcode::kFusion) {
consumer = computation_->AddInstruction(HloInstruction::CreateFusion(
user->shape(), ChooseFusionKind(*producer, *user), user));
TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer));
}
consumer->MergeFusionInstruction(producer);
TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer));
fusion_info_cache_.Invalidate(consumer);
if (dump_fusion_visualization_) {
RegisterFusionState(*computation_,
absl::StrCat("Fused |", producer->name(), "| into |",
user->name(), "| inside FusionMerger"),
*consumer);
}
changed_ = true;
}
CHECK_EQ(0, producer->user_count()) << producer->ToString();
TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer));
TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer));
fusion_info_cache_.Invalidate(producer);
VLOG(2) << "Merged fusion instruction: " << producer->name()
<< " into users { "
<< absl::StrJoin(users, ", ",
[](std::string* out, HloInstruction* user) {
absl::StrAppend(out, user->name());
})
<< " }";
return absl::OkStatus();
}
absl::Status FusionInstructionMerger::Run() {
for (HloInstruction* producer : computation_->MakeInstructionPostOrder()) {
if (producer->opcode() != HloOpcode::kFusion) {
continue;
}
FusionDecision should_fuse = ShouldFuse(producer);
if (should_fuse) {
TF_RETURN_IF_ERROR(FuseIntoAllUsers(producer));
++total_merged_;
} else {
VLOG(3) << "Not fusing fusion |" << producer->name()
<< "| with all of it's users due to: " << should_fuse.Explain();
if (dump_fusion_visualization_ && !producer->users().empty()) {
RegisterFusionState(
*computation_,
absl::StrCat(
"Not fusing fusion |", producer->name(),
"| into all of its users due to: ", should_fuse.Explain()),
*producer->users()[0],
producer);
}
}
}
VLOG(1) << "FusionInstructionMerger EXIT"
<< " computation: " << computation_->name()
<< " total_visited: " << total_visited_
<< " total_merged: " << total_merged_ << " merge failures { "
<< " no_users: " << num_fail_no_users_
<< " not_loop_fusion: " << num_fail_not_loop_fusion_
<< " merge_all_users: " << num_fail_merge_all_users_
<< " uncoalesced_read: " << num_fail_uncoalesced_read_
<< " inefficient_fusion_emitter: "
<< num_fail_inefficient_fusion_emitter_
<< " slower_if_fused: " << num_fail_slower_if_fused_
<< " fusion_too_large: " << num_fail_fusion_too_large_ << " }";
return absl::OkStatus();
}
bool TransposesMostData(const HloInstruction& fusion) {
float score = 0;
for (const HloInstruction* instr : fusion.fused_instructions()) {
if (IsPhysicallyTransposing(*instr)) {
score += 1.0 * ShapeUtil::ElementsInRecursive(instr->shape()) /
ShapeUtil::ElementsInRecursive(fusion.shape());
if (score >= 0.5) {
VLOG(3) << fusion.ToString() << " transpose ratio exceeds " << score;
return true;
}
}
}
return false;
}
FusionDecision FusionInstructionMerger::ShouldFuse(HloInstruction* producer) {
++total_visited_;
VLOG(4) << "Considering producer " << producer->name();
if (producer->users().empty()) {
++num_fail_no_users_;
return FusionDecision::Forbid("fusion has no users");
}
if (!producer->IsLoopFusion()) {
++num_fail_not_loop_fusion_;
return FusionDecision::Forbid("not a loop fusion");
}
auto producer_hero = GetRealHeroForMultiOutputFusion(*producer);
bool has_reduction_user = false;
for (const HloInstruction* user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
++num_fail_merge_all_users_;
return FusionDecision::Forbid("not fusing bitcast ops");
}
if (user->IsCustomFusion()) {
++num_fail_merge_all_users_;
return FusionDecision::Forbid("not fusing custom fusions");
}
auto consumer_hero = GetRealHeroForMultiOutputFusion(*user);
if (auto compatible =
FusionHeroesAreCompatible(producer_hero, consumer_hero);
!compatible) {
return compatible;
}
FusionDecision fusible = IsProducerConsumerFusible(*producer, *user);
if (!fusible) {
++num_fail_merge_all_users_;
VLOG(9) << user->ToString();
return fusible;
}
if (IsInputFusibleReduction(*user)) {
has_reduction_user = true;
}
}
if (has_reduction_user && TransposesMostData(*producer)) {
++num_fail_uncoalesced_read_;
return FusionDecision::Forbid("would read mostly uncoalesced");
}
for (const HloInstruction* user : producer->users()) {
FusionDecision fits = FusionFitsInBudget(
*user, *producer, gpu_device_info_,
true, &fusion_info_cache_);
if (!fits) {
++num_fail_fusion_too_large_;
return fits;
}
}
if (!cost_analysis_) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
{},
true},
gpu_device_info_);
TF_CHECK_OK(computation_->Accept(&cost_analysis_.value()));
}
for (const HloInstruction* user : producer->users()) {
if (cost_analysis_->ProducerConsumerMergedTooLarge(*producer, *user)) {
++num_fail_inefficient_fusion_emitter_;
return FusionDecision::Forbid("if merged with ")
<< user->name() << " will generate huge IR";
}
}
GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes(
producer, gpu_device_info_, &*cost_analysis_,
GpuPerformanceModelOptions::Default(), producer->users());
if (t.time_fused > t.time_unfused) {
++num_fail_slower_if_fused_;
return FusionDecision::Forbid("will execute slower if fused");
}
return FusionDecision::Allow();
}
absl::StatusOr<bool> FusionMerger::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
VLOG(1) << "FusionMerger for module: " << module->name();
for (auto* computation :
module->MakeNonfusionComputations(execution_threads)) {
VLOG(9) << "Before running FusionInstructionMerger for computation: "
<< computation->name();
XLA_VLOG_LINES(9, computation->ToString());
FusionInstructionMerger fusion_merger(computation, gpu_device_info_,
shape_size_function_);
TF_RETURN_IF_ERROR(fusion_merger.Run());
changed |= fusion_merger.changed();
VLOG(9) << "After running FusionInstructionMerger for computation: "
<< computation->name() << " changed: " << changed;
XLA_VLOG_LINES(9, computation->ToString());
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/fusion_merger.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class FusionMergerTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
FusionMerger fusion_merger_{TestGpuDeviceInfo::RTXA6000DeviceInfo(),
ShapeSizeBytesFunction()};
FusionMergerTest() : HloTestBase() {}
};
TEST_F(FusionMergerTest, MergeSharedFusionInstruction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
comp.3 {
constant.param_0 = f32[4]{0} parameter(0)
param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1)
get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0
ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6)
}
comp.2 {
param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1
get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2
ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY MergeSharedFusionInstruction.Computation0 {
constant = f32[4]{0} constant({1, 1, 1, 1})
param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3
fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2
fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1
fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
auto* root = module->entry_computation()->root_instruction();
EXPECT_EQ(HloOpcode::kTuple, root->opcode());
auto* operand0 = root->operand(0);
EXPECT_EQ(HloOpcode::kFusion, operand0->opcode());
EXPECT_EQ(4, operand0->fused_instruction_count());
auto* operand1 = root->operand(1);
EXPECT_EQ(HloOpcode::kFusion, operand1->opcode());
EXPECT_EQ(7, operand1->fused_instruction_count());
auto* operand2 = root->operand(2);
EXPECT_EQ(HloOpcode::kFusion, operand2->opcode());
EXPECT_EQ(7, operand2->fused_instruction_count());
}
TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT _ = f32[] add(x, y)
}
comp0 {
p = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(0)
gte0 = f32[2048] get-tuple-element(p), index=0
gte1 = f32[2048] get-tuple-element(p), index=1
add.9 = f32[2048] add(gte0, gte1)
gte2 = f32[2048] get-tuple-element(p), index=2
add.10 = f32[2048] add(add.9, gte2)
gte3 = f32[2048] get-tuple-element(p), index=3
add.11 = f32[2048] add(add.10, gte3)
p1 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(1)
gte4 = f32[2048] get-tuple-element(p1), index=0
gte5 = f32[2048] get-tuple-element(p1), index=1
add.12 = f32[2048] add(gte4, gte5)
gte6 = f32[2048] get-tuple-element(p1), index=2
add.13 = f32[2048] add(add.12, gte6)
gte7 = f32[2048] get-tuple-element(p1), index=3
add.14 = f32[2048] add(add.13, gte7)
ROOT r = f32[2048] add(add.14, add.11)
}
comp1 {
p = f32[2048] parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
}
comp2 {
p = f32[2048] parameter(0)
c0 = f32[] constant(0)
r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add
ROOT n = f32[] negate(r)
}
ENTRY m.Computation2 {
p0 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(0)
p1 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(1)
fusion.0 = f32[2048] fusion(p0, p1), kind=kLoop, calls=comp0
fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1
fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2
ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, LessMemoryAccessIfFused) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
comp.2 {
state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0
get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1
add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6)
get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2
ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7)
}
comp.1 {
add.1.param_1.1 = f32[4]{0} parameter(1)
constant.param_1.3 = f32[4]{0} parameter(0)
add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3)
ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3)
}
comp {
add.1.param_1 = f32[4]{0} parameter(1)
constant.param_1.1 = f32[4]{0} parameter(0)
multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1)
ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1)
}
ENTRY m.Computation2 {
constant = f32[4]{0} constant({1, 1, 1, 1})
state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0)
fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2
fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1
fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp
ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeIntoInputFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[32]{0} parameter(0)
ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[32]{0} parameter(0)
f2_mul = f32[32]{0} multiply(f2_p0, f2_p0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[32]{0} parameter(0)
f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())));
}
TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule jit_matmul.36
max (parameter.13: f32[], parameter.14: f32[]) -> f32[] {
parameter.13 = f32[] parameter(0)
parameter.14 = f32[] parameter(1)
ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14)
}
add (parameter.29: f32[], parameter.30: f32[]) -> f32[] {
parameter.29 = f32[] parameter(0)
parameter.30 = f32[] parameter(1)
ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30)
}
fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] {
param_1.4 = f32[200,200,200]{2,1,0} parameter(0)
param_2.1 = f32[200,200]{1,0} parameter(1)
broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2}
subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3)
exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0)
constant.27 = f32[] constant(0)
ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add
}
fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] {
param_1.9 = f32[200,200]{1,0} parameter(1)
broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1}
param_0.7 = f32[200,200]{1,0} parameter(0)
broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2}
ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8)
}
ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] {
parameter.2 = f32[200,200]{1,0} parameter(1)
parameter.1 = f32[200,200]{1,0} parameter(0)
fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3
constant.11 = f32[] constant(-inf)
reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max
ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter())));
}
TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0)
ROOT f1_root = f32[16,16,256]{2,1,0} copy(add)
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1_computation {
f1_p0 = f32[16,16,256]{0,1,2} parameter(0)
slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]}
f1_copy = f32[5,16,256]{2,1,0} copy(slice1)
slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]}
bitcast = f32[11,16,256]{2,1,0} bitcast(slice2)
ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0}
}
add_computation {
add_lhs = f32[] parameter(0)
add_rhs = f32[] parameter(1)
ROOT add_root = f32[] add(add_lhs, add_rhs)
}
f2_computation {
f2_p0 = f32[16,16,256]{2,1,0} parameter(0)
f2_zero = f32[] constant(0)
ROOT f2_root = f32[16,16] reduce(f2_p0, f2_zero), dimensions={2},
to_apply=add_computation
}
ENTRY entry {
p0 = f32[16,16,256]{0,1,2} parameter(0)
f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation
ROOT f2 = f32[16,16] fusion(f1), kind=kInput, calls=f2_computation
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, AvoidsLargeFusion) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
std::vector<HloInstruction*> entry_params;
for (int64_t i = 0; i < kNumParams; ++i) {
entry_params.push_back(
b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p")));
}
auto make_fusion = [&](absl::Span<HloInstruction* const> params) {
HloComputation::Builder sub_builder("subcomp");
HloInstruction* sum = nullptr;
for (int64_t i = 0; i < params.size(); ++i) {
auto p = sub_builder.AddInstruction(
HloInstruction::CreateParameter(i, shape, "p"));
if (sum == nullptr) {
sum = p;
} else {
sum = sub_builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p));
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(sub_builder.Build());
return HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, params, subcomp);
};
auto fusion = b.AddInstruction(
make_fusion(absl::MakeSpan(entry_params)
.subspan(0, MaxOperandsAndOutputsPerFusion())));
b.AddInstruction(make_fusion({entry_params.back(), fusion}));
module->AddEntryComputation(b.Build());
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
Arg_0.5 = f32[200000] parameter(0)
slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]}
slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]}
add.9 = f32[100000] add(slice.7, slice.8)
slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]}
slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]}
add.12 = f32[50000] add(slice.10, slice.11)
slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]}
slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]}
add.15 = f32[25000] add(slice.13, slice.14)
slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]}
slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]}
add.18 = f32[12500] add(slice.16, slice.17)
slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]}
slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]}
add.21 = f32[6250] add(slice.19, slice.20)
slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]}
slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]}
ROOT add.24 = f32[3125] add(slice.22, slice.23)
}
f2 {
Arg_0 = f32[3125] parameter(0)
slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]}
slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]}
add.27 = f32[1562] add(slice.25, slice.26)
slice.28 = f32[781] slice(add.27), slice={[0:1561:2]}
slice.29 = f32[781] slice(add.27), slice={[1:1562:2]}
add.30 = f32[781] add(slice.28, slice.29)
slice.31 = f32[390] slice(add.30), slice={[0:780:2]}
slice.32 = f32[390] slice(add.30), slice={[1:781:2]}
add.33 = f32[390] add(slice.31, slice.32)
slice.34 = f32[195] slice(add.33), slice={[0:389:2]}
slice.35 = f32[195] slice(add.33), slice={[1:390:2]}
add.36 = f32[195] add(slice.34, slice.35)
slice.37 = f32[97] slice(add.36), slice={[0:194:2]}
slice.38 = f32[97] slice(add.36), slice={[1:195:2]}
add.39 = f32[97] add(slice.37, slice.38)
slice.40 = f32[48] slice(add.39), slice={[0:96:2]}
slice.41 = f32[48] slice(add.39), slice={[1:97:2]}
ROOT add.42 = f32[48] add(slice.40, slice.41)
}
ENTRY e {
p0 = f32[200000] parameter(0)
f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1
ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f1 {
p01 = s8[1000000] parameter(0)
ROOT s0 = s8[10] slice(p01), slice={[0:10]}
}
f2 {
p02 = s8[10] parameter(0)
ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0}
}
ENTRY e {
p0 = s8[1000000] parameter(0)
f1 = s8[10] fusion(p0), kind=kLoop, calls=f1
ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_a (p: f32[]) -> f32[1024,1024,1024] {
%p = f32[] parameter(0)
%b = f32[1024,1024,1024] broadcast(%p), dimensions={}
ROOT %t = f32[1024,1024,1024] tanh(%b)
}
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
ENTRY entry {
p0 = f32[] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a
f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b
f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
ROOT f4 = f32[1024,1024,1024] add(f2, f3)
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] tanh(%p)
}
%f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024] add(%p, %p)
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
%f_b {
%p = f32[1024,1024,1024] parameter(0)
%t1 = f32[1024,1024,1024] tanh(%p)
%t2 = f32[1024,1024,1024] tanh(%t1)
%t3 = f32[1024,1024,1024] tanh(%t2)
%t4 = f32[1024,1024,1024] tanh(%t3)
%t5 = f32[1024,1024,1024] tanh(%t4)
%t6 = f32[1024,1024,1024] tanh(%t5)
%t7 = f32[1024,1024,1024] tanh(%t6)
%t8 = f32[1024,1024,1024] tanh(%t7)
ROOT %t9 = f32[1024,1024,1024] tanh(%t8)
}
%f_c {
%p = f32[1024,1024,1024] parameter(0)
ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2}
}
ENTRY entry {
p0 = f32[1024,1024,1024] parameter(0)
f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b
ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c
})")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeWithBitcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.103 {
param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.420 = f32[8,512]{1,0} parameter(1)
bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420)
convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144)
bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252)
broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2}
divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481)
ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15)
}
fused_computation.105 {
param_1.426 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426)
transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2}
param_0.315 = f16[8,512]{1,0} parameter(0)
broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2}
subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482)
ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22)
}
fused_computation.104 {
param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0)
convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000)
constant_752 = f32[] constant(-0)
ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752),
dimensions={1}, to_apply=f32add
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105
bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105)
fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104
ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.45 {
param_1.194 = f16[8,1536,512]{2,1,0} parameter(1)
bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194)
param_0.135 = f16[8,512]{1,0} parameter(0)
broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2}
subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391)
ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6)
}
f32add {
x.634 = f32[] parameter(0)
y.635 = f32[] parameter(1)
ROOT add.636 = f32[] add(x.634, y.635)
}
fused_computation.44 {
param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869)
transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2}
bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212)
constant_429 = f32[] constant(0)
ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add
}
fused_computation.43 {
param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0)
param_1.188 = f32[8,512]{1,0} parameter(1)
bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188)
convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040)
bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220)
broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2}
divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390)
ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11)
}
ENTRY entry {
p0 = f16[8,1536,512]{2,1,0} parameter(0)
p1 = f16[8,512]{1,0} parameter(1)
fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45
fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44
ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43
}
)")
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CostBasedNoMerge) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
add_float_.56 {
x.57 = f32[] parameter(0)
y.58 = f32[] parameter(1)
ROOT add.59 = f32[] add(x.57, y.58)
}
fused_computation.66 {
constant.635 = f32[] constant(0)
broadcast.257 = f32[459,3]{1,0} broadcast(constant.635), dimensions={}
constant.641 = f32[] constant(1)
broadcast.256 = f32[459,3]{1,0} broadcast(constant.641), dimensions={}
broadcast.255 = f32[459]{0} broadcast(constant.635), dimensions={}
iota.28 = f32[459]{0} iota(), iota_dimension=0
constant.629 = f32[] constant(1.49891067)
broadcast.253 = f32[459]{0} broadcast(constant.629), dimensions={}
multiply.39 = f32[459]{0} multiply(iota.28, broadcast.253)
constant.633 = f32[] constant(-1)
broadcast.252 = f32[459]{0} broadcast(constant.633), dimensions={}
add.31 = f32[459]{0} add(multiply.39, broadcast.252)
ceil.11 = f32[459]{0} ceil(add.31)
constant.630 = f32[] constant(685)
broadcast.251 = f32[459]{0} broadcast(constant.630), dimensions={}
clamp.49 = f32[459]{0} clamp(broadcast.255, ceil.11, broadcast.251)
subtract.11 = f32[459]{0} subtract(clamp.49, multiply.39)
broadcast.249 = f32[459,3]{1,0} broadcast(subtract.11), dimensions={0}
iota.26 = f32[459,3]{1,0} iota(), iota_dimension=1
add.30 = f32[459,3]{1,0} add(broadcast.249, iota.26)
abs.3 = f32[459,3]{1,0} abs(add.30)
subtract.10 = f32[459,3]{1,0} subtract(broadcast.256, abs.3)
maximum.6 = f32[459,3]{1,0} maximum(broadcast.257, subtract.10)
ROOT reduce.3 = f32[459]{0} reduce(maximum.6, constant.635), dimensions={1}, to_apply=add_float_.56
}
fused_computation.67 {
constant.684 = f32[] constant(0)
broadcast.296 = f32[1130,3]{1,0} broadcast(constant.684), dimensions={}
constant.685 = f32[] constant(1)
broadcast.295 = f32[1130,3]{1,0} broadcast(constant.685), dimensions={}
broadcast.294 = f32[1130]{0} broadcast(constant.684), dimensions={}
iota.41 = f32[1130]{0} iota(), iota_dimension=0
constant.675 = f32[] constant(1.34513271)
broadcast.293 = f32[1130]{0} broadcast(constant.675), dimensions={}
multiply.47 = f32[1130]{0} multiply(iota.41, broadcast.293)
constant.677 = f32[] constant(-1)
broadcast.290 = f32[1130]{0} broadcast(constant.677), dimensions={}
add.39 = f32[1130]{0} add(multiply.47, broadcast.290)
ceil.15 = f32[1130]{0} ceil(add.39)
constant.676 = f32[] constant(1517)
broadcast.289 = f32[1130]{0} broadcast(constant.676), dimensions={}
clamp.53 = f32[1130]{0} clamp(broadcast.294, ceil.15, broadcast.289)
subtract.19 = f32[1130]{0} subtract(clamp.53, multiply.47)
broadcast.287 = f32[1130,3]{1,0} broadcast(subtract.19), dimensions={0}
iota.39 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.38 = f32[1130,3]{1,0} add(broadcast.287, iota.39)
abs.7 = f32[1130,3]{1,0} abs(add.38)
subtract.18 = f32[1130,3]{1,0} subtract(broadcast.295, abs.7)
maximum.10 = f32[1130,3]{1,0} maximum(broadcast.296, subtract.18)
ROOT reduce.4 = f32[1130]{0} reduce(maximum.10, constant.684), dimensions={1}, to_apply=add_float_.56
}
fused_computation.59 {
constant.532 = f32[] constant(0)
broadcast.316 = f32[1130,3]{1,0} broadcast(constant.532), dimensions={}
constant.663 = f32[] constant(1)
broadcast.315 = f32[1130,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.314 = f32[1130]{0} broadcast(constant.532), dimensions={}
iota.47 = f32[1130]{0} iota(), iota_dimension=0
constant.579 = f32[] constant(1.34513271)
broadcast.311 = f32[1130]{0} broadcast(constant.579), dimensions={}
multiply.51 = f32[1130]{0} multiply(iota.47, broadcast.311)
constant.578 = f32[] constant(-1)
broadcast.310 = f32[1130]{0} broadcast(constant.578), dimensions={}
add.43 = f32[1130]{0} add(multiply.51, broadcast.310)
ceil.17 = f32[1130]{0} ceil(add.43)
constant.576 = f32[] constant(1517)
broadcast.309 = f32[1130]{0} broadcast(constant.576), dimensions={}
clamp.55 = f32[1130]{0} clamp(broadcast.314, ceil.17, broadcast.309)
subtract.24 = f32[1130]{0} subtract(clamp.55, multiply.51)
broadcast.306 = f32[1130,3]{1,0} broadcast(subtract.24), dimensions={0}
iota.45 = f32[1130,3]{1,0} iota(), iota_dimension=1
add.42 = f32[1130,3]{1,0} add(broadcast.306, iota.45)
abs.9 = f32[1130,3]{1,0} abs(add.42)
subtract.23 = f32[1130,3]{1,0} subtract(broadcast.315, abs.9)
maximum.12 = f32[1130,3]{1,0} maximum(broadcast.316, subtract.23)
param_2.183 = f32[1130]{0} parameter(2)
broadcast.172 = f32[1130,3]{1,0} broadcast(param_2.183), dimensions={0}
divide.3 = f32[1130,3]{1,0} divide(maximum.12, broadcast.172)
bitcast.53 = f32[3390]{0} bitcast(divide.3)
broadcast.171 = f32[3390,1377]{1,0} broadcast(bitcast.53), dimensions={0}
broadcast.276 = f32[459,3]{1,0} broadcast(constant.532), dimensions={}
broadcast.275 = f32[459,3]{1,0} broadcast(constant.663), dimensions={}
broadcast.274 = f32[459]{0} broadcast(constant.532), dimensions={}
iota.35 = f32[459]{0} iota(), iota_dimension=0
constant.614 = f32[] constant(1.49891067)
broadcast.273 = f32[459]{0} broadcast(constant.614), dimensions={}
multiply.43 = f32[459]{0} multiply(iota.35, broadcast.273)
broadcast.272 = f32[459]{0} broadcast(constant.578), dimensions={}
add.35 = f32[459]{0} add(multiply.43, broadcast.272)
ceil.13 = f32[459]{0} ceil(add.35)
constant.611 = f32[] constant(685)
broadcast.269 = f32[459]{0} broadcast(constant.611), dimensions={}
clamp.51 = f32[459]{0} clamp(broadcast.274, ceil.13, broadcast.269)
subtract.15 = f32[459]{0} subtract(clamp.51, multiply.43)
broadcast.267 = f32[459,3]{1,0} broadcast(subtract.15), dimensions={0}
iota.33 = f32[459,3]{1,0} iota(), iota_dimension=1
add.34 = f32[459,3]{1,0} add(broadcast.267, iota.33)
abs.5 = f32[459,3]{1,0} abs(add.34)
subtract.14 = f32[459,3]{1,0} subtract(broadcast.275, abs.5)
maximum.8 = f32[459,3]{1,0} maximum(broadcast.276, subtract.14)
param_1.177 = f32[459]{0} parameter(1)
broadcast.170 = f32[459,3]{1,0} broadcast(param_1.177), dimensions={0}
divide.2 = f32[459,3]{1,0} divide(maximum.8, broadcast.170)
bitcast.52 = f32[1377]{0} bitcast(divide.2)
broadcast.169 = f32[3390,1377]{1,0} broadcast(bitcast.52), dimensions={1}
multiply.15 = f32[3390,1377]{1,0} multiply(broadcast.171, broadcast.169)
bitcast.61 = f32[1130,3,459,3]{3,2,1,0} bitcast(multiply.15)
transpose.68 = f32[459,1130,3,3]{2,0,3,1} transpose(bitcast.61), dimensions={2,0,3,1}
copy.1 = f32[459,1130,3,3]{3,2,1,0} copy(transpose.68)
bitcast.50 = f32[1130,459,9]{2,1,0} bitcast(copy.1)
broadcast.168 = f32[1130,459,6,9]{3,2,1,0} broadcast(bitcast.50), dimensions={0,1,3}
param_0.171 = u8[1,688,1520,6]{3,2,1,0} parameter(0)
bitcast.49 = u8[688,1520,1,6]{3,1,0,2} bitcast(param_0.171)
convert.175 = f32[688,1520,1,6]{3,1,0,2} convert(bitcast.49)
broadcast.167 = f32[459,1130,1]{2,1,0} broadcast(clamp.51), dimensions={0}
broadcast.166 = f32[459,1130,1]{2,1,0} broadcast(clamp.55), dimensions={1}
concatenate.3 = f32[459,1130,2]{2,1,0} concatenate(broadcast.167, broadcast.166), dimensions={2}
convert.174 = s32[459,1130,2]{2,1,0} convert(concatenate.3)
bitcast.48 = s32[518670,2]{1,0} bitcast(convert.174)
gather.1 = f32[518670,3,3,1,6]{2,1,4,0,3} gather(convert.175, bitcast.48), offset_dims={1,2,3,4}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={3,3,1,6}
transpose.69 = f32[1,518670,6,3,3]{4,3,2,1,0} transpose(gather.1), dimensions={3,0,4,1,2}
bitcast.47 = f32[1130,459,6,9]{3,2,1,0} bitcast(transpose.69)
multiply.14 = f32[1130,459,6,9]{3,2,1,0} multiply(broadcast.168, bitcast.47)
reduce.2 = f32[1130,459,6]{2,1,0} reduce(multiply.14, constant.532), dimensions={3}, to_apply=add_float_.56
convert.173 = f16[1130,459,6]{2,1,0} convert(reduce.2)
bitcast.46 = f16[1,459,1130,6]{3,2,1,0} bitcast(convert.173)
constant.533 = f16[] constant(0)
pad.9 = f16[1,480,1130,6]{3,2,1,0} pad(bitcast.46, constant.533), padding=0_0x0_21x0_0x0_0
pad.8 = f16[1,480,1152,6]{3,2,1,0} pad(pad.9, constant.533), padding=0_0x0_0x0_22x0_0
constant.532f16 = f16[] constant(0)
ROOT pad.7 = f16[1,485,1157,6]{3,2,1,0} pad(pad.8, constant.532f16), padding=0_0x2_3x2_3x0_0
}
ENTRY e {
arg0.1 = u8[1,688,1520,6]{3,2,1,0} parameter(0), parameter_replication={false}
fusion.66 = f32[459]{0} fusion(), kind=kLoop, calls=fused_computation.66
fusion.67 = f32[1130]{0} fusion(), kind=kLoop, calls=fused_computation.67
ROOT fusion.59 = f16[1,485,1157,6]{2,1,3,0} fusion(arg0.1, fusion.66, fusion.67), kind=kLoop, calls=fused_computation.59
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, NoMergeBecauseTooManyBasicBlockSplits) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
region_6.97 {
Arg_0.98 = pred[] parameter(0)
Arg_1.99 = pred[] parameter(1)
ROOT or.100 = pred[] or(Arg_0.98, Arg_1.99)
}
region_4.50 {
Arg_0.51 = f64[] parameter(0)
Arg_1.52 = f64[] parameter(1)
ROOT add.53 = f64[] add(Arg_0.51, Arg_1.52)
}
f2 {
param_0 = s64[1]{0} parameter(0)
constant_70 = f64[] constant(0)
convert.41.clone.1 = f64[1]{0} convert(param_0)
ROOT pad.99.clone.1 = f64[3]{0} pad(convert.41.clone.1, constant_70), padding=0_2
}
f1 {
param_0.361 = pred[5]{0} parameter(0)
broadcast.107 = pred[10,5]{1,0} broadcast(param_0.361), dimensions={1}
param_6.244 = pred[5]{0} parameter(6)
broadcast.111.clone.1 = pred[10,5]{1,0} broadcast(param_6.244), dimensions={1}
param_1.450 = f64[10,5]{1,0} parameter(1)
constant_294_clone_1 = f64[] constant(1)
broadcast.153.clone.1 = f64[10,5]{1,0} broadcast(constant_294_clone_1), dimensions={}
compare.22.clone.1 = pred[10,5]{1,0} compare(param_1.450, broadcast.153.clone.1), direction=GE
constant_75_clone_1 = f64[] constant(-1)
broadcast.109.clone.1 = f64[10,5]{1,0} broadcast(constant_75_clone_1), dimensions={}
add.34.clone.1 = f64[10,5]{1,0} add(param_1.450, broadcast.109.clone.1)
param_5.322 = f64[10,5,4]{1,0,2} parameter(5)
slice.45.clone.1 = f64[10,5,1]{1,0,2} slice(param_5.322), slice={[0:10], [0:5], [3:4]}
bitcast.94.clone.1 = f64[10,5]{1,0} bitcast(slice.45.clone.1)
divide.7.clone.1 = f64[10,5]{1,0} divide(add.34.clone.1, bitcast.94.clone.1)
add.33.clone.1 = f64[10,5]{1,0} add(divide.7.clone.1, broadcast.153.clone.1)
constant_70 = f64[] constant(0)
broadcast.157.clone.1 = f64[10,5]{1,0} broadcast(constant_70), dimensions={}
compare.26.clone.1 = pred[10,5]{1,0} compare(param_1.450, broadcast.157.clone.1), direction=LE
slice.46.clone.1 = f64[10,5,1]{1,0,2} slice(param_5.322), slice={[0:10], [0:5], [0:1]}
bitcast.93.clone.1 = f64[10,5]{1,0} bitcast(slice.46.clone.1)
divide.6.clone.1 = f64[10,5]{1,0} divide(param_1.450, bitcast.93.clone.1)
broadcast.295.clone.1 = f64[10,5,3]{1,0,2} broadcast(param_1.450), dimensions={0,1}
param_4.368 = f64[10,5,2]{1,0,2} parameter(4)
pad.103.clone.1 = f64[10,5,3]{1,0,2} pad(param_4.368, constant_70), padding=0_0x0_0x1_0
compare.121.clone.1 = pred[10,5,3]{1,0,2} compare(broadcast.295.clone.1, pad.103.clone.1), direction=GE
pad.102.clone.1 = f64[10,5,3]{1,0,2} pad(param_4.368, constant_294_clone_1), padding=0_0x0_0x0_1
compare.120.clone.1 = pred[10,5,3]{1,0,2} compare(broadcast.295.clone.1, pad.102.clone.1), direction=LT
and.39.clone.1 = pred[10,5,3]{1,0,2} and(compare.121.clone.1, compare.120.clone.1)
transpose.9 = pred[3,10,5]{2,1,0} transpose(and.39.clone.1), dimensions={2,0,1}
constant_296_clone_1 = pred[] constant(false)
reduce.91.clone.1 = pred[10,5]{1,0} reduce(transpose.9, constant_296_clone_1), dimensions={0}, to_apply=region_6.97
broadcast.294.clone.1 = pred[10,5,3]{1,0,2} broadcast(reduce.91.clone.1), dimensions={0,1}
pad.99.clone.1 = f64[3]{0} parameter(3)
broadcast.292.clone.1 = f64[3]{0} broadcast(constant_70), dimensions={}
compare.117.clone.1 = pred[3]{0} compare(pad.99.clone.1, broadcast.292.clone.1), direction=NE
broadcast.290.clone.1 = pred[10,5,3]{1,0,2} broadcast(compare.117.clone.1), dimensions={2}
select.67.clone.1 = pred[10,5,3]{1,0,2} select(broadcast.294.clone.1, and.39.clone.1, broadcast.290.clone.1)
convert.40.clone.1 = f64[10,5,3]{1,0,2} convert(select.67.clone.1)
broadcast.288.clone.1 = f64[10,5,3,3]{1,0,2,3} broadcast(convert.40.clone.1), dimensions={0,1,2}
param_2.361 = f64[10,5,4,3]{1,0,2,3} parameter(2)
slice.114.clone.1 = f64[10,5,3,3]{1,0,2,3} slice(param_2.361), slice={[0:10], [0:5], [1:4], [0:3]}
multiply.53.clone.1 = f64[10,5,3,3]{1,0,2,3} multiply(broadcast.288.clone.1, slice.114.clone.1)
transpose.10 = f64[3,3,10,5]{3,2,1,0} transpose(multiply.53.clone.1), dimensions={3,2,0,1}
reduce.90.clone.1 = f64[3,10,5]{2,1,0} reduce(transpose.10, constant_70), dimensions={1}, to_apply=region_4.50
transpose.11 = f64[10,5,3]{1,0,2} transpose(reduce.90.clone.1), dimensions={1,2,0}
slice.28.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [0:1]}
bitcast.99.clone.1 = f64[10,5]{1,0} bitcast(slice.28.clone.1)
slice.108.clone.1 = f64[10,5,3,3]{1,0,2,3} slice(param_2.361), slice={[0:10], [0:5], [0:3], [0:3]}
multiply.49.clone.1 = f64[10,5,3,3]{1,0,2,3} multiply(broadcast.288.clone.1, slice.108.clone.1)
transpose.12 = f64[3,3,10,5]{3,2,1,0} transpose(multiply.49.clone.1), dimensions={3,2,0,1}
reduce.82.clone.1 = f64[3,10,5]{2,1,0} reduce(transpose.12, constant_70), dimensions={1}, to_apply=region_4.50
transpose.13 = f64[10,5,3]{1,0,2} transpose(reduce.82.clone.1), dimensions={1,2,0}
slice.107.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [0:1]}
bitcast.240.clone.1 = f64[10,5]{1,0} bitcast(slice.107.clone.1)
subtract.27.clone.1 = f64[10,5]{1,0} subtract(bitcast.99.clone.1, bitcast.240.clone.1)
slice.27.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [2:3]}
bitcast.98.clone.1 = f64[10,5]{1,0} bitcast(slice.27.clone.1)
slice.26.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [2:3]}
bitcast.97.clone.1 = f64[10,5]{1,0} bitcast(slice.26.clone.1)
add.36.clone.1 = f64[10,5]{1,0} add(bitcast.97.clone.1, bitcast.98.clone.1)
slice.24.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [1:2]}
bitcast.95.clone.1 = f64[10,5]{1,0} bitcast(slice.24.clone.1)
slice.121.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [1:2]}
bitcast.274.clone.1 = f64[10,5]{1,0} bitcast(slice.121.clone.1)
subtract.26.clone.1 = f64[10,5]{1,0} subtract(bitcast.95.clone.1, bitcast.274.clone.1)
divide.21 = f64[10,5]{1,0} divide(subtract.26.clone.1, subtract.27.clone.1)
constant_77_clone_1 = f64[] constant(2)
broadcast.117.clone.1 = f64[10,5]{1,0} broadcast(constant_77_clone_1), dimensions={}
multiply.37.clone.1 = f64[10,5]{1,0} multiply(divide.21, broadcast.117.clone.1)
subtract.25.clone.1 = f64[10,5]{1,0} subtract(add.36.clone.1, multiply.37.clone.1)
subtract.24.clone.1 = f64[10,5]{1,0} subtract(param_1.450, bitcast.274.clone.1)
divide.9.clone.1 = f64[10,5]{1,0} divide(subtract.24.clone.1, subtract.26.clone.1)
clamp.7.clone.1 = f64[10,5]{1,0} clamp(broadcast.157.clone.1, divide.9.clone.1, broadcast.153.clone.1)
multiply.36.clone.1 = f64[10,5]{1,0} multiply(subtract.25.clone.1, clamp.7.clone.1)
subtract.23.clone.1 = f64[10,5]{1,0} subtract(bitcast.98.clone.1, multiply.36.clone.1)
compare.13.clone.1 = pred[10,5]{1,0} compare(subtract.23.clone.1, broadcast.157.clone.1), direction=GE
negate.19.clone.1 = f64[10,5]{1,0} negate(divide.21)
multiply.35.clone.1 = f64[10,5]{1,0} multiply(negate.19.clone.1, clamp.7.clone.1)
multiply.34.clone.1 = f64[10,5]{1,0} multiply(multiply.35.clone.1, broadcast.117.clone.1)
negate.18.clone.1 = f64[10,5]{1,0} negate(subtract.23.clone.1)
multiply.33.clone.1 = f64[10,5]{1,0} multiply(subtract.23.clone.1, subtract.23.clone.1)
subtract.22.clone.1 = f64[10,5]{1,0} subtract(divide.21, subtract.23.clone.1)
constant_78_clone_1 = f64[] constant(4)
broadcast.113.clone.1 = f64[10,5]{1,0} broadcast(constant_78_clone_1), dimensions={}
multiply.32.clone.1 = f64[10,5]{1,0} multiply(subtract.22.clone.1, broadcast.113.clone.1)
multiply.31.clone.1 = f64[10,5]{1,0} multiply(multiply.32.clone.1, multiply.35.clone.1)
subtract.21.clone.1 = f64[10,5]{1,0} subtract(multiply.33.clone.1, multiply.31.clone.1)
compare.12.clone.1 = pred[10,5]{1,0} compare(subtract.21.clone.1, broadcast.157.clone.1), direction=GT
constant_79_clone_1 = f64[] constant(2.2250738585072014e-308)
broadcast.112.clone.1 = f64[10,5]{1,0} broadcast(constant_79_clone_1), dimensions={}
maximum.18.clone.1 = f64[10,5]{1,0} maximum(broadcast.112.clone.1, subtract.21.clone.1)
sqrt.1.clone.1 = f64[10,5]{1,0} sqrt(maximum.18.clone.1)
select.47.clone.1 = f64[10,5]{1,0} select(compare.12.clone.1, sqrt.1.clone.1, broadcast.157.clone.1)
add.35.clone.1 = f64[10,5]{1,0} add(negate.18.clone.1, select.47.clone.1)
select.46.clone.1 = f64[10,5]{1,0} select(compare.13.clone.1, multiply.34.clone.1, add.35.clone.1)
subtract.20.clone.1 = f64[10,5]{1,0} subtract(negate.18.clone.1, select.47.clone.1)
multiply.30.clone.1 = f64[10,5]{1,0} multiply(subtract.22.clone.1, broadcast.117.clone.1)
select.45.clone.1 = f64[10,5]{1,0} select(compare.13.clone.1, subtract.20.clone.1, multiply.30.clone.1)
divide.8.clone.1 = f64[10,5]{1,0} divide(select.46.clone.1, select.45.clone.1)
clamp.6.clone.1 = f64[10,5]{1,0} clamp(broadcast.157.clone.1, divide.8.clone.1, broadcast.153.clone.1)
multiply.29.clone.1 = f64[10,5]{1,0} multiply(subtract.27.clone.1, clamp.6.clone.1)
add.32.clone.1 = f64[10,5]{1,0} add(multiply.29.clone.1, bitcast.240.clone.1)
select.44.clone.1 = f64[10,5]{1,0} select(compare.26.clone.1, divide.6.clone.1, add.32.clone.1)
select.43.clone.1 = f64[10,5]{1,0} select(compare.22.clone.1, add.33.clone.1, select.44.clone.1)
select.42.clone.1 = f64[10,5]{1,0} select(broadcast.111.clone.1, param_1.450, select.43.clone.1)
select.41 = f64[10,5]{1,0} select(broadcast.107, select.42.clone.1, broadcast.157.clone.1)
ROOT tuple.14 = (f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}) tuple(select.41, select.42.clone.1, clamp.6.clone.1, subtract.25.clone.1, bitcast.97.clone.1, multiply.37.clone.1, bitcast.98.clone.1, divide.21)
}
ENTRY e {
p3 = s64[1]{0} parameter(3)
f2 = f64[3]{0} fusion(p3), kind=kLoop, calls=f2
p0 = pred[5]{0} parameter(0)
p1 = f64[10,5]{1,0} parameter(1)
p2 = f64[10,5,4,3]{1,0,2,3} parameter(2)
p4 = f64[10,5,2]{1,0,2} parameter(4)
p5 = f64[10,5,4]{1,0,2} parameter(5)
p6 = pred[5]{0} parameter(6)
ROOT ret = (f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}) fusion(p0, p1, p2, f2, p4, p5, p6), kind=kLoop, calls=f1
}
)")
.value();
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, CommonElementwiseUsedParameter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
p {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
p2 = f32[10000000] parameter(2)
p3 = f32[10000000] parameter(3)
a0 = f32[10000000] add(p1, p2)
a1 = f32[10000000] add(a0, p3)
ROOT _ = add(p0, a1)
}
c1 {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT _ = add(p0, p1)
}
c2 {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT _ = multiply(p0, p1)
}
ENTRY entry {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
p2 = f32[10000000] parameter(2)
p3 = f32[10000000] parameter(3)
f = f32[10000000] fusion(p0, p1, p2, p3), kind=kLoop, calls=p
f1 = f32[10000000] fusion(p0, f), kind=kLoop, calls=c1
f2 = f32[10000000] fusion(p1, f), kind=kLoop, calls=c2
ROOT _ = (f32[10000000], f32[10000000]) tuple(f1, f2)
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, IncompatibleNonTrivialHeroes) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
param_0.1 = f32[18,16,32]{2,1,0} parameter(0)
param_1.1 = f32[32,16,18]{2,1,0} parameter(1)
s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1)
t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0}
sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1)
exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1)
ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.1)
}
fused_computation.2 {
param_0.2 = f32[32,16,18]{2,1,0} parameter(0)
s.2 = f32[32,16,18]{2,1,0} sqrt(param_0.2)
ROOT t.2 = f32[32,18,16]{2,1,0} transpose(s.2), dimensions={0,2,1}
}
ENTRY main {
p = f32[18,16,32]{2,1,0} parameter(0)
p2 = f32[32,16,18]{2,1,0} parameter(1)
fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation
ROOT fusion2 = f32[32,18,16]{2,1,0} fusion(fusion), kind=kInput, calls=fused_computation.2
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, DoNotMergeDUSFusions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
%fused_computation (param_0: f32[8], param_1.2: f32[], param_2.3: f32[8]) -> f32[8] {
%param_0 = f32[8]{0} parameter(0)
%param_2.3 = f32[8]{0} parameter(2)
%slice.2 = f32[5]{0} slice(f32[8]{0} %param_2.3), slice={[0:5]}
%param_1.2 = f32[] parameter(1)
%broadcast.2 = f32[5]{0} broadcast(f32[] %param_1.2), dimensions={}
%add.2 = f32[5]{0} add(f32[5]{0} %slice.2, f32[5]{0} %broadcast.2)
%two.1 = s32[] constant(2)
ROOT %dynamic-update-slice.2 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0, f32[5]{0} %add.2, s32[] %two.1)
}
%fused_computation.1 (param_0.1: f32[8], param_1.4: f32[6], param_2.6: f32[]) -> f32[8] {
%param_0.1 = f32[8]{0} parameter(0)
%param_1.4 = f32[6]{0} parameter(1)
%param_2.6 = f32[] parameter(2)
%broadcast.3 = f32[6]{0} broadcast(f32[] %param_2.6), dimensions={}
%add.3 = f32[6]{0} add(f32[6]{0} %param_1.4, f32[6]{0} %broadcast.3)
%three.1 = s32[] constant(3)
ROOT %dynamic-update-slice.3 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0.1, f32[6]{0} %add.3, s32[] %three.1)
}
ENTRY %Test (parameter: f32[8]) -> f32[8] {
%parameter = f32[8]{0} parameter(0)
%slice.1 = f32[6]{0} slice(f32[8]{0} %parameter), slice={[0:6]}
%one = f32[] constant(1)
%fusion.1 = f32[8]{0} fusion(f32[8]{0} %parameter, f32[6]{0} %slice.1, f32[] %one), kind=kLoop, calls=%fused_computation.1
ROOT %fusion = f32[8]{0} fusion(f32[8]{0} %fusion.1, f32[] %one, f32[8]{0} %parameter), kind=kLoop, calls=%fused_computation
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, MergeDUSFusionWithElementwiseFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
%fused_computation {
%param_0 = f32[1,8]{1,0} parameter(0)
%bitcast = f32[8]{0} bitcast(%param_0)
ROOT %neg = f32[8]{0} negate(%bitcast)
}
%fused_computation.1 {
%param_0.1 = f32[8]{0} parameter(0)
%param_1.4 = f32[5]{0} parameter(1)
%three.1 = s32[] constant(3)
%exp = f32[5]{0} exponential(%param_1.4)
ROOT %dynamic-update-slice.3 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0.1, f32[5]{0} %exp, s32[] %three.1)
}
ENTRY %Test {
%parameter = f32[5]{0} parameter(0)
%parameter.1 = f32[1,8]{1,0} parameter(1)
%fusion = f32[8]{0} fusion(f32[1,8]{1,0} %parameter.1), kind=kLoop, calls=%fused_computation
ROOT %fusion.1 = f32[8]{0} fusion(f32[8]{0} %fusion, f32[5]{0} %parameter), kind=kLoop, calls=%fused_computation.1
}
)")
.value();
EXPECT_TRUE(fusion_merger_.Run(module.get()).value());
}
TEST_F(FusionMergerTest, DoNotMergeTwoReduces) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add
}
)")
.value();
EXPECT_FALSE(fusion_merger_.Run(module.get()).value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_merger.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_merger_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5710bd06-fb26-4a74-960c-a31248cb0fb0 | cpp | tensorflow/tensorflow | squeeze | tensorflow/lite/kernels/squeeze.cc | tensorflow/lite/kernels/squeeze_test.cc | #include <stdint.h>
#include <string.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/portable_tensor.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace squeeze {
struct SqueezeContext {
SqueezeContext(TfLiteContext* context, TfLiteNode* node)
: params(reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data)),
input(GetInput(context, node, 0)),
output(GetOutput(context, node, 0)) {}
TfLiteSqueezeParams* params;
const TfLiteTensor* const input;
TfLiteTensor* output;
};
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
SqueezeContext op_context(context, node);
int input_num_dims = NumDimensions(op_context.input);
int num_squeeze_dims = op_context.params->num_squeeze_dims;
const TfLiteIntArray* input_dims = op_context.input->dims;
const int32_t* squeeze_dims = op_context.params->squeeze_dims;
TF_LITE_ENSURE(context, input_num_dims <= 8);
bool should_squeeze[8] = {false};
int num_squeezed_dims = 0;
if (num_squeeze_dims == 0) {
for (int idx = 0; idx < input_num_dims; ++idx) {
if (input_dims->data[idx] == 1) {
should_squeeze[idx] = true;
++num_squeezed_dims;
}
}
} else {
for (int idx = 0; idx < num_squeeze_dims; ++idx) {
int32_t current = squeeze_dims[idx] < 0
? squeeze_dims[idx] + input_num_dims
: squeeze_dims[idx];
TF_LITE_ENSURE(context, current >= 0 && current < input_num_dims &&
input_dims->data[current] == 1);
if (!should_squeeze[current]) ++num_squeezed_dims;
should_squeeze[current] = true;
}
}
TfLiteIntArray* output_dims =
TfLiteIntArrayCreate(input_num_dims - num_squeezed_dims);
for (int in_idx = 0, out_idx = 0; in_idx < input_num_dims; ++in_idx) {
if (!should_squeeze[in_idx]) {
output_dims->data[out_idx++] = input_dims->data[in_idx];
}
}
return context->ResizeTensor(context, op_context.output, output_dims);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
SqueezeContext op_context(context, node);
if (op_context.input->type == kTfLiteString) {
const int input_flat_size = GetTensorShape(op_context.input).FlatSize();
const int output_flat_size = GetTensorShape(op_context.output).FlatSize();
TF_LITE_ENSURE_EQ(context, input_flat_size, output_flat_size);
SequentialTensorWriter<string> writer(op_context.input, op_context.output);
for (int i = 0; i < input_flat_size; i++) {
writer.Write(i);
}
return kTfLiteOk;
}
TF_LITE_ENSURE_EQ(context, op_context.input->bytes, op_context.output->bytes);
if (op_context.output->data.data != op_context.input->data.data) {
memcpy(op_context.output->data.data, op_context.input->data.data,
op_context.input->bytes);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_SQUEEZE() {
static TfLiteRegistration r = {
nullptr,
nullptr,
squeeze::Prepare,
squeeze::Eval,
nullptr,
0,
nullptr,
0,
nullptr,
nullptr,
kTfLiteInplaceOpInput0Shared | kTfLiteInplaceOpDataUnmodified};
return &r;
}
}
}
} | #include <stdint.h>
#include <initializer_list>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
class BaseSqueezeOpModel : public SingleOpModel {
public:
BaseSqueezeOpModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_SQUEEZE, BuiltinOptions_SqueezeOptions,
CreateSqueezeOptions(builder_, builder_.CreateVector<int>(axis))
.Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
protected:
int input_;
int output_;
};
template <typename T>
class SqueezeOpModel : public BaseSqueezeOpModel {
public:
using BaseSqueezeOpModel::BaseSqueezeOpModel;
void SetInput(std::initializer_list<T> data) { PopulateTensor(input_, data); }
void SetStringInput(std::initializer_list<string> data) {
PopulateStringTensor(input_, data);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<string> GetStringOutput() {
return ExtractVector<string>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
};
template <typename T>
class SqueezeOpTest : public ::testing::Test {};
using DataTypes = ::testing::Types<float, int8_t, int16_t, int32_t>;
TYPED_TEST_SUITE(SqueezeOpTest, DataTypes);
TYPED_TEST(SqueezeOpTest, SqueezeAllInplace) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {});
m.SetInput(data);
const int kInplaceInputTensorIdx = 0;
const int kInplaceOutputTensorIdx = 0;
const TfLiteTensor* input_tensor = m.GetInputTensor(kInplaceInputTensorIdx);
TfLiteTensor* output_tensor = m.GetOutputTensor(kInplaceOutputTensorIdx);
output_tensor->data.data = input_tensor->data.data;
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
EXPECT_EQ(output_tensor->data.data, input_tensor->data.data);
}
TYPED_TEST(SqueezeOpTest, SqueezeAll) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeSelectedAxis) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {2});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeNegativeAxis) {
std::initializer_list<TypeParam> data = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
SqueezeOpModel<TypeParam> m({GetTensorType<TypeParam>(), {1, 24, 1}},
{GetTensorType<TypeParam>(), {24}}, {-1, 0});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}));
}
TYPED_TEST(SqueezeOpTest, SqueezeAllDims) {
std::initializer_list<TypeParam> data = {3};
SqueezeOpModel<TypeParam> m(
{GetTensorType<TypeParam>(), {1, 1, 1, 1, 1, 1, 1}},
{GetTensorType<TypeParam>(), {1}}, {});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3}));
}
TEST(SqueezeOpTest, SqueezeAllString) {
std::initializer_list<std::string> data = {"a", "b"};
SqueezeOpModel<std::string> m({GetTensorType<std::string>(), {1, 2, 1}},
{GetTensorType<std::string>(), {2}}, {});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a", "b"}));
}
TEST(SqueezeOpTest, SqueezeNegativeAxisString) {
std::initializer_list<std::string> data = {"a", "b"};
SqueezeOpModel<std::string> m({GetTensorType<std::string>(), {1, 2, 1}},
{GetTensorType<std::string>(), {24}}, {-1});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a", "b"}));
}
TEST(SqueezeOpTest, SqueezeAllDimsString) {
std::initializer_list<std::string> data = {"a"};
SqueezeOpModel<std::string> m(
{GetTensorType<std::string>(), {1, 1, 1, 1, 1, 1, 1}},
{GetTensorType<std::string>(), {1}}, {});
m.SetStringInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), IsEmpty());
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({"a"}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/squeeze.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/squeeze_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
853575ff-8d14-42fe-819a-ae8c76631c2d | cpp | tensorflow/tensorflow | logging_ops | tensorflow/core/ops/logging_ops.cc | tensorflow/core/kernels/logging_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/dataset_stateful_op_allowlist.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
REGISTER_OP("Assert")
.Input("condition: bool")
.Input("data: T")
.SetIsStateful()
.Attr("T: list(type)")
.Attr("summarize: int = 3")
.SetShapeFn(shape_inference::NoOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Assert");
REGISTER_OP("Print")
.Input("input: T")
.Input("data: U")
.Output("output: T")
.SetIsStateful()
.Attr("T: type")
.Attr("U: list(type) >= 0")
.Attr("message: string = ''")
.Attr("first_n: int = -1")
.Attr("summarize: int = 3")
.SetShapeFn(shape_inference::UnchangedShape);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Print");
REGISTER_OP("PrintV2")
.Input("input: string")
.SetIsStateful()
.Attr("output_stream: string = 'stderr'")
.Attr("end: string = '\n'")
.SetShapeFn([](InferenceContext* c) {
if (!c->RankKnown(c->input(0))) return absl::OkStatus();
if (c->Rank(c->input(0)) != 0) {
return errors::InvalidArgument("input must be a scalar, but has rank: ",
c->Rank(c->input(0)));
}
return absl::OkStatus();
});
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("PrintV2");
REGISTER_OP("TensorSummaryV2")
.Input("tag: string")
.Input("tensor: T")
.Input("serialized_summary_metadata: string")
.Output("summary: string")
.Attr("T: type")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("TensorSummary")
.Input("tensor: T")
.Output("summary: string")
.Attr("T: type")
.Attr("description: string = ''")
.Attr("labels: list(string) = []")
.Attr("display_name: string = ''")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("ImageSummary")
.Input("tag: string")
.Input("tensor: T")
.Output("summary: string")
.Attr("max_images: int >= 1 = 3")
.Attr("T: {uint8, float, half, float64} = DT_FLOAT")
.Attr(
"bad_color: tensor = { dtype: DT_UINT8 "
"tensor_shape: { dim { size: 4 } } "
"int_val: 255 int_val: 0 int_val: 0 int_val: 255 }")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AudioSummaryV2")
.Input("tag: string")
.Input("tensor: float")
.Input("sample_rate: float")
.Output("summary: string")
.Attr("max_outputs: int >= 1 = 3")
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("AudioSummary")
.Input("tag: string")
.Input("tensor: float")
.Output("summary: string")
.Attr("sample_rate: float")
.Attr("max_outputs: int >= 1 = 3")
.SetShapeFn(shape_inference::ScalarShape)
.Deprecated(15, "Use AudioSummaryV2.");
REGISTER_OP("Timestamp")
.Output("ts: float64")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("Timestamp");
} | #include <chrono>
#include <thread>
#include "xla/tsl/util/determinism_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/status_matchers.h"
namespace tensorflow {
namespace {
class PrintingV2GraphTest : public OpsTestBase {
protected:
Status Init(const string& output_stream = "log(warning)") {
TF_CHECK_OK(NodeDefBuilder("op", "PrintV2")
.Input(FakeInput(DT_STRING))
.Attr("output_stream", output_stream)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(PrintingV2GraphTest, StringSuccess) {
TF_ASSERT_OK(Init());
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
}
TEST_F(PrintingV2GraphTest, InvalidOutputStream) {
ASSERT_NE(absl::OkStatus(), (Init("invalid_output_stream")));
}
TEST_F(PrintingV2GraphTest, InvalidInputRank) {
TF_ASSERT_OK(Init());
AddInputFromArray<tstring>(TensorShape({2}), {"bar", "foo"});
ASSERT_NE(absl::OkStatus(), RunOpKernel());
}
class PrintingGraphTest : public OpsTestBase {
protected:
Status Init(DataType input_type1, DataType input_type2, string msg = "",
int first_n = -1, int summarize = 3) {
TF_CHECK_OK(NodeDefBuilder("op", "Print")
.Input(FakeInput(input_type1))
.Input(FakeInput(2, input_type2))
.Attr("message", msg)
.Attr("first_n", first_n)
.Attr("summarize", summarize)
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(PrintingGraphTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, Int32Success_Summarize6) {
TF_ASSERT_OK(Init(DT_INT32, DT_INT32, "", -1, 6));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, MsgSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "Message: "));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(PrintingGraphTest, FirstNSuccess) {
TF_ASSERT_OK(Init(DT_INT32, DT_STRING, "", 3));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
AddInputFromArray<tstring>(TensorShape({}), {"foo"});
AddInputFromArray<tstring>(TensorShape({}), {"bar"});
for (int i = 0; i < 4; i++) TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
class TimestampTest : public OpsTestBase {
protected:
Status Init() {
TF_CHECK_OK(NodeDefBuilder("op", "Timestamp").Finalize(node_def()));
return InitOp();
}
};
TEST_F(TimestampTest, WaitAtLeast) {
TF_ASSERT_OK(Init());
TF_ASSERT_OK(RunOpKernel());
double ts1 = *((*GetOutput(0)).flat<double>().data());
std::this_thread::sleep_for(std::chrono::seconds(1));
TF_ASSERT_OK(RunOpKernel());
double ts2 = *((*GetOutput(0)).flat<double>().data());
EXPECT_LE(1.0, ts2 - ts1);
}
TEST_F(TimestampTest, DeterminismError) {
tsl::test::DeterministicOpsScope det_scope;
TF_ASSERT_OK(Init());
EXPECT_THAT(RunOpKernel(),
testing::StatusIs(
error::FAILED_PRECONDITION,
"Timestamp cannot be called when determinism is enabled"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/logging_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/logging_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f350a9ab-b4e3-4c7d-ac03-af724def09d0 | cpp | tensorflow/tensorflow | scan_loop_accumulator_input_unification | third_party/xla/xla/service/scan_loop_accumulator_input_unification.cc | third_party/xla/xla/service/scan_loop_accumulator_input_unification_test.cc | #include "xla/service/scan_loop_accumulator_input_unification.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/service/while_loop_unroller.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool LoopIndexIsReadOnly(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr, int64_t idx) {
const HloDataflowAnalysis& dataflow_analysis =
alias_analysis.dataflow_analysis();
return !(
dataflow_analysis.GetValueSet(while_instr->while_init(), {idx})
.values()
.size() > 1 ||
dataflow_analysis.GetValueSet(while_instr, {idx}).values().size() > 1 ||
dataflow_analysis.GetUniqueValueAt(while_instr, {idx}) !=
dataflow_analysis.GetUniqueValueAt(while_instr->while_init(), {idx}));
}
std::vector<std::pair<HloInstruction*, HloInstruction*>>
FindAccumulatorInputPairs(const HloAliasAnalysis& alias_analysis,
HloInstruction* while_instr,
const WhileLoopConfig& config) {
HloComputation* computation = while_instr->while_body();
HloInstruction* body_param = computation->parameter_instruction(0);
std::vector<HloInstruction*> possible_acc;
for (int64_t param_idx = 0;
param_idx < while_instr->while_init()->operand_count(); ++param_idx) {
for (HloInstruction* gte : body_param->users()) {
if (!Match(gte, match::GetTupleElement().WithTupleIndex(param_idx))) {
continue;
}
if (gte->operand(0) != body_param) {
continue;
}
if (gte->user_count() > 1 || gte->user_count() == 0) {
continue;
}
HloInstruction* gte_user = gte->users().at(0);
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, gte, HloOpcode::kDynamicUpdateSlice, config)
.has_value()) {
if (computation->root_instruction()->mutable_operand(param_idx) ==
gte_user) {
possible_acc.push_back(gte);
VLOG(3) << "accumulator index: " << param_idx << " = " << gte->name();
}
}
}
}
auto operand_index = [](HloInstruction* instr,
HloInstruction* operand) -> int64_t {
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (operand == instr->operand(i)) {
return i;
}
}
return -1;
};
auto find_gte_instr = [](HloInstruction* tuple,
int64_t idx) -> HloInstruction* {
for (HloInstruction* instr : tuple->parent()->MakeInstructionPostOrder()) {
HloInstruction* operand;
if (Match(instr, match::GetTupleElement()
.WithOperand(0, match::Op(&operand))
.WithTupleIndex(idx))) {
if (operand != tuple) {
continue;
}
return instr;
}
}
return nullptr;
};
auto check_single_user_not_null = [](HloInstruction* instr) -> bool {
if (instr == nullptr || instr->user_count() != 1) {
return false;
}
return true;
};
std::vector<std::pair<HloInstruction*, HloInstruction*>> acc_input_pairs;
HloComputation* outer_while_body = while_instr->parent();
for (HloInstruction* acc : possible_acc) {
VLOG(3) << "Looking for corresponding input for " << acc->name();
HloInstruction* acc_gte_outer_body =
find_gte_instr(while_instr, acc->tuple_index());
if (acc_gte_outer_body == nullptr) {
continue;
}
int64_t idx =
operand_index(outer_while_body->root_instruction(), acc_gte_outer_body);
VLOG(3) << "Accumulator output of the scan in the outer body = "
<< acc_gte_outer_body->name() << ", index = " << idx;
if (idx == -1) {
continue;
}
HloInstruction* input_gte_outer =
find_gte_instr(outer_while_body->parameter_instruction(0), idx);
if (!check_single_user_not_null(input_gte_outer)) {
continue;
}
if (input_gte_outer->users().at(0) != while_instr->while_init()) {
continue;
}
VLOG(3) << "Input parameter outer body = " << input_gte_outer->name()
<< ", index = " << input_gte_outer->tuple_index();
int64_t input_idx_inner =
operand_index(while_instr->while_init(), input_gte_outer);
HloInstruction* input_gte_inner =
find_gte_instr(computation->parameter_instruction(0), input_idx_inner);
if (!LoopIndexIsReadOnly(alias_analysis, while_instr, input_idx_inner)) {
continue;
}
VLOG(3) << "Input parameter scan body = " << input_gte_inner->name()
<< ", index = " << input_gte_inner->tuple_index();
if (input_gte_inner->user_count() != 2) {
continue;
}
HloInstruction* gte_user = input_gte_inner->users().at(0);
VLOG(3) << "User of the inner loop input = " << gte_user->ToString();
if (MatchShapeCoveringDynamicIndexInstruction(
gte_user, input_gte_inner, HloOpcode::kDynamicSlice, config)
.has_value()) {
acc_input_pairs.emplace_back(acc, input_gte_inner);
}
}
return acc_input_pairs;
}
absl::StatusOr<bool> UnifyAccumulatorWithInput(
const HloAliasAnalysis& alias_analysis,
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops) {
std::unique_ptr<CallGraph> call_graph =
CallGraph::Build(&alias_analysis.dataflow_analysis().module());
auto is_while_body = [&](HloComputation* comp) {
std::vector<HloInstruction*> callers =
call_graph->GetComputationCallers(comp);
return !callers.empty() && callers.at(0)->opcode() == HloOpcode::kWhile;
};
std::vector<HloInstruction*> changed_loops;
bool unified = false;
for (auto& [while_instr, loop_config] : unrollable_loops) {
if (!is_while_body(while_instr->parent())) {
continue;
}
auto acc_input_pairs =
FindAccumulatorInputPairs(alias_analysis, while_instr, loop_config);
for (const auto& [acc, input] : acc_input_pairs) {
if (Match(while_instr->while_init()->mutable_operand(acc->tuple_index()),
match::GetTupleElement(match::Parameter()))) {
continue;
}
VLOG(3) << while_instr->name() << " -> " << "<accumulator_@"
<< acc->tuple_index() << ": " << acc->name() << ", " << "input_@"
<< input->tuple_index() << ": " << input->name() << ">";
TF_RETURN_IF_ERROR(input->ReplaceAllUsesWith(acc));
TF_RETURN_IF_ERROR(while_instr->while_init()->ReplaceOperandWith(
acc->tuple_index(),
while_instr->while_init()->mutable_operand(input->tuple_index())));
if (input->user_count() == 0) {
TF_RETURN_IF_ERROR(while_instr->while_body()->RemoveInstruction(input));
}
unified = true;
}
}
return unified;
}
}
absl::StatusOr<bool> ScanLoopAccumulatorInputUnification::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "HLO module before ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
std::vector<std::pair<HloInstruction*, WhileLoopConfig>> unrollable_loops =
WhileLoopUnroller::GetUnrollableLoops(module, execution_threads,
std::nullopt);
TF_ASSIGN_OR_RETURN(bool changed, UnifyAccumulatorWithInput(
*alias_analysis, unrollable_loops));
if (changed) {
for (auto& [while_instr, loop_config] : unrollable_loops) {
TF_RETURN_IF_ERROR(TryRemoveDeadWhileParams(while_instr).status());
}
TF_RETURN_IF_ERROR(TupleSimplifier{}.Run(module).status());
TF_RETURN_IF_ERROR(module->RemoveUnusedComputations());
VLOG(2) << "HLO module after ScanLoopAccumulatorInputUnification:";
XLA_VLOG_LINES(2, module->ToString());
} else {
VLOG(2) << "HLO module unchanged after ScanLoopAccumulatorInputUnification";
}
return changed;
}
} | #include "xla/service/scan_loop_accumulator_input_unification.h"
#include <memory>
#include <optional>
#include <utility>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/copy_insertion.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ScanLoopAccumulatorInputUnificationTest = HloTestBase;
HloInstruction* GetTopLevelWhileInstruction(HloModule* module) {
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
return instr;
}
}
return nullptr;
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
tuple.8 = (s32[], s32[], s32[8]) tuple(constant.3, init, array)
while = (s32[], s32[], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, UnifyAccumulatorInput2) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, broadcast2, get-tuple-element.54)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
EXPECT_EQ(instr->while_init()->operand(3)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, AccumulatorAllocateOutside) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, get-tuple-element.54, get-tuple-element.48)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.48, get-tuple-element.40)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
buffer = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, buffer)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=3
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, InputDifferentShape) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8,10] get-tuple-element(wide.arg_tuple.8), index=3
zero = s32[] constant(0)
dynamic-slice.0 = s32[1,10] dynamic-slice(get-tuple-element.54, get-tuple-element.46, zero), dynamic_slice_sizes={1,10}
reshape.2 = s32[10] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(reshape.2, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(get-tuple-element.47, reshape.3)
reshape.4 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8,10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8,10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8,10] parameter(0)
broadcast.5 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8,10]) tuple(constant.3, init, broadcast.5, array)
while = (s32[], s32[], s32[8], s32[8,10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.39 = s32[] get-tuple-element(while), index=1
ROOT get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_FALSE(simplified_loop);
}
TEST_F(ScanLoopAccumulatorInputUnificationTest, MultipleUsersInput) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[8] get-tuple-element(wide.arg_tuple.8), index=4
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=5
mult = s32[8] multiply(get-tuple-element.54, get-tuple-element.54)
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
add.1 = s32[] add(get-tuple-element.47, reshape.2)
reshape.3 = s32[1] reshape(add.1)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.3, get-tuple-element.46)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.56, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.4 = s32[] reshape(dynamic-slice.1)
add.2 = s32[] multiply(get-tuple-element.47, reshape.4)
reshape.5 = s32[1] reshape(add.2)
dynamic-update-slice.1 = s32[8] dynamic-update-slice(get-tuple-element.55, reshape.5, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, dynamic-update-slice.1, get-tuple-element.56)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.56 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
broadcast2 = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.54, broadcast2, get-tuple-element.56)
while = (s32[], s32[], s32[8], s32[8], s32[8], s32[8]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=4
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[8]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.41)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
array2 = s32[8] constant({10,20,30,40,50,60,70,80})
tuple.8 = (s32[], s32[], s32[8], s32[8]) tuple(constant.3, init, array, array2)
while = (s32[], s32[], s32[8], s32[8]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
get-tuple-element.41 = s32[8] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[8]) tuple(get-tuple-element.40, get-tuple-element.41)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
for (HloInstruction* instr :
module->entry_computation()->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kWhile) {
EXPECT_EQ(instr->while_init()->operand(2)->opcode(),
HloOpcode::kConstant);
}
}
EXPECT_TRUE(RunAndCompareTwoModules(
std::move(module), std::move(module_clone), {}, std::nullopt, true));
}
TEST_F(ScanLoopAccumulatorInputUnificationTest,
UnifyAccumulatorInputCheckCopy) {
[[maybe_unused]] constexpr char kModule[] = R"(
HloModule jit_scan
wide.region_0.7 {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.54 = s32[8] get-tuple-element(wide.arg_tuple.8), index=3
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=4
dynamic-slice.0 = s32[1] dynamic-slice(get-tuple-element.54, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.2 = s32[] reshape(dynamic-slice.0)
dynamic-slice.1 = s32[1] dynamic-slice(get-tuple-element.55, get-tuple-element.46), dynamic_slice_sizes={1}
reshape.3 = s32[] reshape(dynamic-slice.1)
add.1 = s32[] add(reshape.3, reshape.2)
add.2 = s32[] add(add.1, get-tuple-element.47)
reshape.4 = s32[1] reshape(add.2)
dynamic-update-slice.0 = s32[8] dynamic-update-slice(get-tuple-element.48, reshape.4, get-tuple-element.46)
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT tuple.10 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(add.0, add.1, dynamic-update-slice.0, get-tuple-element.54, get-tuple-element.55)
}
wide.region_1.29 {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
outer_body {
wide.arg_tuple.8 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.46 = s32[] get-tuple-element(wide.arg_tuple.8), index=0
get-tuple-element.47 = s32[] get-tuple-element(wide.arg_tuple.8), index=1
get-tuple-element.48 = s32[8] get-tuple-element(wide.arg_tuple.8), index=2
get-tuple-element.55 = s32[10] get-tuple-element(wide.arg_tuple.8), index=3
constant.3 = s32[] constant(0)
broadcast = s32[8] broadcast(constant.3), dimensions={}
tuple.8 = (s32[], s32[], s32[8], s32[8], s32[10]) tuple(constant.3, get-tuple-element.47, broadcast, get-tuple-element.48, get-tuple-element.55)
while = (s32[], s32[], s32[8], s32[8], s32[10]) while(tuple.8), condition=wide.region_1.29, body=wide.region_0.7
get-tuple-element.40 = s32[8] get-tuple-element(while), index=2
const = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.46, const)
ROOT out = (s32[], s32[], s32[8], s32[10]) tuple(add.0, get-tuple-element.47, get-tuple-element.40, get-tuple-element.55)
}
outer_cond {
constant.5 = s32[] constant(8)
wide.arg_tuple.30 = (s32[], s32[], s32[8], s32[10]) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(wide.arg_tuple.30), index=0
ROOT compare.0 = pred[] compare(get-tuple-element.16, constant.5), direction=LT
}
ENTRY main.43 {
constant.3 = s32[] constant(0)
init = s32[] constant(0)
array = s32[8] constant({1,2,3,4,5,6,7,8})
other_input = s32[10] constant({10,20,30,40,50,60,70,80,90,100})
tuple.8 = (s32[], s32[], s32[8], s32[10]) tuple(constant.3, init, array, other_input)
while = (s32[], s32[], s32[8], s32[10]) while(tuple.8), condition=outer_cond, body=outer_body
get-tuple-element.39 = s32[8] get-tuple-element(while), index=2
get-tuple-element.40 = s32[10] get-tuple-element(while), index=3
ROOT out = (s32[8],s32[10]) tuple(get-tuple-element.39, get-tuple-element.40)
}
)";
auto module = ParseAndReturnVerifiedModule(kModule).value();
auto module_clone = module->Clone();
TF_ASSERT_OK_AND_ASSIGN(bool clone_copy_inserted,
CopyInsertion().Run(module_clone.get()));
EXPECT_TRUE(clone_copy_inserted);
HloInstruction* while_instruction =
GetTopLevelWhileInstruction(module_clone.get());
EXPECT_EQ(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
TF_ASSERT_OK_AND_ASSIGN(
bool simplified_loop,
ScanLoopAccumulatorInputUnification().Run(module.get()));
EXPECT_TRUE(simplified_loop);
TF_ASSERT_OK_AND_ASSIGN(bool copy_inserted,
CopyInsertion().Run(module.get()));
EXPECT_TRUE(copy_inserted);
VLOG(3) << "After copy_insertion:\n" << module->ToString();
while_instruction = GetTopLevelWhileInstruction(module.get());
EXPECT_NE(
while_instruction->while_body()->root_instruction()->operand(2)->opcode(),
HloOpcode::kCopy);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scan_loop_accumulator_input_unification.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scan_loop_accumulator_input_unification_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ebac96d-5373-4efe-9bc1-d3e56b54150e | cpp | google/leveldb | crc32c | util/crc32c.cc | util/crc32c_test.cc | #include "util/crc32c.h"
#include <cstddef>
#include <cstdint>
#include "port/port.h"
#include "util/coding.h"
namespace leveldb {
namespace crc32c {
namespace {
const uint32_t kByteExtensionTable[256] = {
0x00000000, 0xf26b8303, 0xe13b70f7, 0x1350f3f4, 0xc79a971f, 0x35f1141c,
0x26a1e7e8, 0xd4ca64eb, 0x8ad958cf, 0x78b2dbcc, 0x6be22838, 0x9989ab3b,
0x4d43cfd0, 0xbf284cd3, 0xac78bf27, 0x5e133c24, 0x105ec76f, 0xe235446c,
0xf165b798, 0x030e349b, 0xd7c45070, 0x25afd373, 0x36ff2087, 0xc494a384,
0x9a879fa0, 0x68ec1ca3, 0x7bbcef57, 0x89d76c54, 0x5d1d08bf, 0xaf768bbc,
0xbc267848, 0x4e4dfb4b, 0x20bd8ede, 0xd2d60ddd, 0xc186fe29, 0x33ed7d2a,
0xe72719c1, 0x154c9ac2, 0x061c6936, 0xf477ea35, 0xaa64d611, 0x580f5512,
0x4b5fa6e6, 0xb93425e5, 0x6dfe410e, 0x9f95c20d, 0x8cc531f9, 0x7eaeb2fa,
0x30e349b1, 0xc288cab2, 0xd1d83946, 0x23b3ba45, 0xf779deae, 0x05125dad,
0x1642ae59, 0xe4292d5a, 0xba3a117e, 0x4851927d, 0x5b016189, 0xa96ae28a,
0x7da08661, 0x8fcb0562, 0x9c9bf696, 0x6ef07595, 0x417b1dbc, 0xb3109ebf,
0xa0406d4b, 0x522bee48, 0x86e18aa3, 0x748a09a0, 0x67dafa54, 0x95b17957,
0xcba24573, 0x39c9c670, 0x2a993584, 0xd8f2b687, 0x0c38d26c, 0xfe53516f,
0xed03a29b, 0x1f682198, 0x5125dad3, 0xa34e59d0, 0xb01eaa24, 0x42752927,
0x96bf4dcc, 0x64d4cecf, 0x77843d3b, 0x85efbe38, 0xdbfc821c, 0x2997011f,
0x3ac7f2eb, 0xc8ac71e8, 0x1c661503, 0xee0d9600, 0xfd5d65f4, 0x0f36e6f7,
0x61c69362, 0x93ad1061, 0x80fde395, 0x72966096, 0xa65c047d, 0x5437877e,
0x4767748a, 0xb50cf789, 0xeb1fcbad, 0x197448ae, 0x0a24bb5a, 0xf84f3859,
0x2c855cb2, 0xdeeedfb1, 0xcdbe2c45, 0x3fd5af46, 0x7198540d, 0x83f3d70e,
0x90a324fa, 0x62c8a7f9, 0xb602c312, 0x44694011, 0x5739b3e5, 0xa55230e6,
0xfb410cc2, 0x092a8fc1, 0x1a7a7c35, 0xe811ff36, 0x3cdb9bdd, 0xceb018de,
0xdde0eb2a, 0x2f8b6829, 0x82f63b78, 0x709db87b, 0x63cd4b8f, 0x91a6c88c,
0x456cac67, 0xb7072f64, 0xa457dc90, 0x563c5f93, 0x082f63b7, 0xfa44e0b4,
0xe9141340, 0x1b7f9043, 0xcfb5f4a8, 0x3dde77ab, 0x2e8e845f, 0xdce5075c,
0x92a8fc17, 0x60c37f14, 0x73938ce0, 0x81f80fe3, 0x55326b08, 0xa759e80b,
0xb4091bff, 0x466298fc, 0x1871a4d8, 0xea1a27db, 0xf94ad42f, 0x0b21572c,
0xdfeb33c7, 0x2d80b0c4, 0x3ed04330, 0xccbbc033, 0xa24bb5a6, 0x502036a5,
0x4370c551, 0xb11b4652, 0x65d122b9, 0x97baa1ba, 0x84ea524e, 0x7681d14d,
0x2892ed69, 0xdaf96e6a, 0xc9a99d9e, 0x3bc21e9d, 0xef087a76, 0x1d63f975,
0x0e330a81, 0xfc588982, 0xb21572c9, 0x407ef1ca, 0x532e023e, 0xa145813d,
0x758fe5d6, 0x87e466d5, 0x94b49521, 0x66df1622, 0x38cc2a06, 0xcaa7a905,
0xd9f75af1, 0x2b9cd9f2, 0xff56bd19, 0x0d3d3e1a, 0x1e6dcdee, 0xec064eed,
0xc38d26c4, 0x31e6a5c7, 0x22b65633, 0xd0ddd530, 0x0417b1db, 0xf67c32d8,
0xe52cc12c, 0x1747422f, 0x49547e0b, 0xbb3ffd08, 0xa86f0efc, 0x5a048dff,
0x8ecee914, 0x7ca56a17, 0x6ff599e3, 0x9d9e1ae0, 0xd3d3e1ab, 0x21b862a8,
0x32e8915c, 0xc083125f, 0x144976b4, 0xe622f5b7, 0xf5720643, 0x07198540,
0x590ab964, 0xab613a67, 0xb831c993, 0x4a5a4a90, 0x9e902e7b, 0x6cfbad78,
0x7fab5e8c, 0x8dc0dd8f, 0xe330a81a, 0x115b2b19, 0x020bd8ed, 0xf0605bee,
0x24aa3f05, 0xd6c1bc06, 0xc5914ff2, 0x37faccf1, 0x69e9f0d5, 0x9b8273d6,
0x88d28022, 0x7ab90321, 0xae7367ca, 0x5c18e4c9, 0x4f48173d, 0xbd23943e,
0xf36e6f75, 0x0105ec76, 0x12551f82, 0xe03e9c81, 0x34f4f86a, 0xc69f7b69,
0xd5cf889d, 0x27a40b9e, 0x79b737ba, 0x8bdcb4b9, 0x988c474d, 0x6ae7c44e,
0xbe2da0a5, 0x4c4623a6, 0x5f16d052, 0xad7d5351};
const uint32_t kStrideExtensionTable0[256] = {
0x00000000, 0x30d23865, 0x61a470ca, 0x517648af, 0xc348e194, 0xf39ad9f1,
0xa2ec915e, 0x923ea93b, 0x837db5d9, 0xb3af8dbc, 0xe2d9c513, 0xd20bfd76,
0x4035544d, 0x70e76c28, 0x21912487, 0x11431ce2, 0x03171d43, 0x33c52526,
0x62b36d89, 0x526155ec, 0xc05ffcd7, 0xf08dc4b2, 0xa1fb8c1d, 0x9129b478,
0x806aa89a, 0xb0b890ff, 0xe1ced850, 0xd11ce035, 0x4322490e, 0x73f0716b,
0x228639c4, 0x125401a1, 0x062e3a86, 0x36fc02e3, 0x678a4a4c, 0x57587229,
0xc566db12, 0xf5b4e377, 0xa4c2abd8, 0x941093bd, 0x85538f5f, 0xb581b73a,
0xe4f7ff95, 0xd425c7f0, 0x461b6ecb, 0x76c956ae, 0x27bf1e01, 0x176d2664,
0x053927c5, 0x35eb1fa0, 0x649d570f, 0x544f6f6a, 0xc671c651, 0xf6a3fe34,
0xa7d5b69b, 0x97078efe, 0x8644921c, 0xb696aa79, 0xe7e0e2d6, 0xd732dab3,
0x450c7388, 0x75de4bed, 0x24a80342, 0x147a3b27, 0x0c5c750c, 0x3c8e4d69,
0x6df805c6, 0x5d2a3da3, 0xcf149498, 0xffc6acfd, 0xaeb0e452, 0x9e62dc37,
0x8f21c0d5, 0xbff3f8b0, 0xee85b01f, 0xde57887a, 0x4c692141, 0x7cbb1924,
0x2dcd518b, 0x1d1f69ee, 0x0f4b684f, 0x3f99502a, 0x6eef1885, 0x5e3d20e0,
0xcc0389db, 0xfcd1b1be, 0xada7f911, 0x9d75c174, 0x8c36dd96, 0xbce4e5f3,
0xed92ad5c, 0xdd409539, 0x4f7e3c02, 0x7fac0467, 0x2eda4cc8, 0x1e0874ad,
0x0a724f8a, 0x3aa077ef, 0x6bd63f40, 0x5b040725, 0xc93aae1e, 0xf9e8967b,
0xa89eded4, 0x984ce6b1, 0x890ffa53, 0xb9ddc236, 0xe8ab8a99, 0xd879b2fc,
0x4a471bc7, 0x7a9523a2, 0x2be36b0d, 0x1b315368, 0x096552c9, 0x39b76aac,
0x68c12203, 0x58131a66, 0xca2db35d, 0xfaff8b38, 0xab89c397, 0x9b5bfbf2,
0x8a18e710, 0xbacadf75, 0xebbc97da, 0xdb6eafbf, 0x49500684, 0x79823ee1,
0x28f4764e, 0x18264e2b, 0x18b8ea18, 0x286ad27d, 0x791c9ad2, 0x49cea2b7,
0xdbf00b8c, 0xeb2233e9, 0xba547b46, 0x8a864323, 0x9bc55fc1, 0xab1767a4,
0xfa612f0b, 0xcab3176e, 0x588dbe55, 0x685f8630, 0x3929ce9f, 0x09fbf6fa,
0x1baff75b, 0x2b7dcf3e, 0x7a0b8791, 0x4ad9bff4, 0xd8e716cf, 0xe8352eaa,
0xb9436605, 0x89915e60, 0x98d24282, 0xa8007ae7, 0xf9763248, 0xc9a40a2d,
0x5b9aa316, 0x6b489b73, 0x3a3ed3dc, 0x0aecebb9, 0x1e96d09e, 0x2e44e8fb,
0x7f32a054, 0x4fe09831, 0xddde310a, 0xed0c096f, 0xbc7a41c0, 0x8ca879a5,
0x9deb6547, 0xad395d22, 0xfc4f158d, 0xcc9d2de8, 0x5ea384d3, 0x6e71bcb6,
0x3f07f419, 0x0fd5cc7c, 0x1d81cddd, 0x2d53f5b8, 0x7c25bd17, 0x4cf78572,
0xdec92c49, 0xee1b142c, 0xbf6d5c83, 0x8fbf64e6, 0x9efc7804, 0xae2e4061,
0xff5808ce, 0xcf8a30ab, 0x5db49990, 0x6d66a1f5, 0x3c10e95a, 0x0cc2d13f,
0x14e49f14, 0x2436a771, 0x7540efde, 0x4592d7bb, 0xd7ac7e80, 0xe77e46e5,
0xb6080e4a, 0x86da362f, 0x97992acd, 0xa74b12a8, 0xf63d5a07, 0xc6ef6262,
0x54d1cb59, 0x6403f33c, 0x3575bb93, 0x05a783f6, 0x17f38257, 0x2721ba32,
0x7657f29d, 0x4685caf8, 0xd4bb63c3, 0xe4695ba6, 0xb51f1309, 0x85cd2b6c,
0x948e378e, 0xa45c0feb, 0xf52a4744, 0xc5f87f21, 0x57c6d61a, 0x6714ee7f,
0x3662a6d0, 0x06b09eb5, 0x12caa592, 0x22189df7, 0x736ed558, 0x43bced3d,
0xd1824406, 0xe1507c63, 0xb02634cc, 0x80f40ca9, 0x91b7104b, 0xa165282e,
0xf0136081, 0xc0c158e4, 0x52fff1df, 0x622dc9ba, 0x335b8115, 0x0389b970,
0x11ddb8d1, 0x210f80b4, 0x7079c81b, 0x40abf07e, 0xd2955945, 0xe2476120,
0xb331298f, 0x83e311ea, 0x92a00d08, 0xa272356d, 0xf3047dc2, 0xc3d645a7,
0x51e8ec9c, 0x613ad4f9, 0x304c9c56, 0x009ea433};
const uint32_t kStrideExtensionTable1[256] = {
0x00000000, 0x54075546, 0xa80eaa8c, 0xfc09ffca, 0x55f123e9, 0x01f676af,
0xfdff8965, 0xa9f8dc23, 0xabe247d2, 0xffe51294, 0x03eced5e, 0x57ebb818,
0xfe13643b, 0xaa14317d, 0x561dceb7, 0x021a9bf1, 0x5228f955, 0x062fac13,
0xfa2653d9, 0xae21069f, 0x07d9dabc, 0x53de8ffa, 0xafd77030, 0xfbd02576,
0xf9cabe87, 0xadcdebc1, 0x51c4140b, 0x05c3414d, 0xac3b9d6e, 0xf83cc828,
0x043537e2, 0x503262a4, 0xa451f2aa, 0xf056a7ec, 0x0c5f5826, 0x58580d60,
0xf1a0d143, 0xa5a78405, 0x59ae7bcf, 0x0da92e89, 0x0fb3b578, 0x5bb4e03e,
0xa7bd1ff4, 0xf3ba4ab2, 0x5a429691, 0x0e45c3d7, 0xf24c3c1d, 0xa64b695b,
0xf6790bff, 0xa27e5eb9, 0x5e77a173, 0x0a70f435, 0xa3882816, 0xf78f7d50,
0x0b86829a, 0x5f81d7dc, 0x5d9b4c2d, 0x099c196b, 0xf595e6a1, 0xa192b3e7,
0x086a6fc4, 0x5c6d3a82, 0xa064c548, 0xf463900e, 0x4d4f93a5, 0x1948c6e3,
0xe5413929, 0xb1466c6f, 0x18beb04c, 0x4cb9e50a, 0xb0b01ac0, 0xe4b74f86,
0xe6add477, 0xb2aa8131, 0x4ea37efb, 0x1aa42bbd, 0xb35cf79e, 0xe75ba2d8,
0x1b525d12, 0x4f550854, 0x1f676af0, 0x4b603fb6, 0xb769c07c, 0xe36e953a,
0x4a964919, 0x1e911c5f, 0xe298e395, 0xb69fb6d3, 0xb4852d22, 0xe0827864,
0x1c8b87ae, 0x488cd2e8, 0xe1740ecb, 0xb5735b8d, 0x497aa447, 0x1d7df101,
0xe91e610f, 0xbd193449, 0x4110cb83, 0x15179ec5, 0xbcef42e6, 0xe8e817a0,
0x14e1e86a, 0x40e6bd2c, 0x42fc26dd, 0x16fb739b, 0xeaf28c51, 0xbef5d917,
0x170d0534, 0x430a5072, 0xbf03afb8, 0xeb04fafe, 0xbb36985a, 0xef31cd1c,
0x133832d6, 0x473f6790, 0xeec7bbb3, 0xbac0eef5, 0x46c9113f, 0x12ce4479,
0x10d4df88, 0x44d38ace, 0xb8da7504, 0xecdd2042, 0x4525fc61, 0x1122a927,
0xed2b56ed, 0xb92c03ab, 0x9a9f274a, 0xce98720c, 0x32918dc6, 0x6696d880,
0xcf6e04a3, 0x9b6951e5, 0x6760ae2f, 0x3367fb69, 0x317d6098, 0x657a35de,
0x9973ca14, 0xcd749f52, 0x648c4371, 0x308b1637, 0xcc82e9fd, 0x9885bcbb,
0xc8b7de1f, 0x9cb08b59, 0x60b97493, 0x34be21d5, 0x9d46fdf6, 0xc941a8b0,
0x3548577a, 0x614f023c, 0x635599cd, 0x3752cc8b, 0xcb5b3341, 0x9f5c6607,
0x36a4ba24, 0x62a3ef62, 0x9eaa10a8, 0xcaad45ee, 0x3eced5e0, 0x6ac980a6,
0x96c07f6c, 0xc2c72a2a, 0x6b3ff609, 0x3f38a34f, 0xc3315c85, 0x973609c3,
0x952c9232, 0xc12bc774, 0x3d2238be, 0x69256df8, 0xc0ddb1db, 0x94dae49d,
0x68d31b57, 0x3cd44e11, 0x6ce62cb5, 0x38e179f3, 0xc4e88639, 0x90efd37f,
0x39170f5c, 0x6d105a1a, 0x9119a5d0, 0xc51ef096, 0xc7046b67, 0x93033e21,
0x6f0ac1eb, 0x3b0d94ad, 0x92f5488e, 0xc6f21dc8, 0x3afbe202, 0x6efcb744,
0xd7d0b4ef, 0x83d7e1a9, 0x7fde1e63, 0x2bd94b25, 0x82219706, 0xd626c240,
0x2a2f3d8a, 0x7e2868cc, 0x7c32f33d, 0x2835a67b, 0xd43c59b1, 0x803b0cf7,
0x29c3d0d4, 0x7dc48592, 0x81cd7a58, 0xd5ca2f1e, 0x85f84dba, 0xd1ff18fc,
0x2df6e736, 0x79f1b270, 0xd0096e53, 0x840e3b15, 0x7807c4df, 0x2c009199,
0x2e1a0a68, 0x7a1d5f2e, 0x8614a0e4, 0xd213f5a2, 0x7beb2981, 0x2fec7cc7,
0xd3e5830d, 0x87e2d64b, 0x73814645, 0x27861303, 0xdb8fecc9, 0x8f88b98f,
0x267065ac, 0x727730ea, 0x8e7ecf20, 0xda799a66, 0xd8630197, 0x8c6454d1,
0x706dab1b, 0x246afe5d, 0x8d92227e, 0xd9957738, 0x259c88f2, 0x719bddb4,
0x21a9bf10, 0x75aeea56, 0x89a7159c, 0xdda040da, 0x74589cf9, 0x205fc9bf,
0xdc563675, 0x88516333, 0x8a4bf8c2, 0xde4cad84, 0x2245524e, 0x76420708,
0xdfbadb2b, 0x8bbd8e6d, 0x77b471a7, 0x23b324e1};
const uint32_t kStrideExtensionTable2[256] = {
0x00000000, 0x678efd01, 0xcf1dfa02, 0xa8930703, 0x9bd782f5, 0xfc597ff4,
0x54ca78f7, 0x334485f6, 0x3243731b, 0x55cd8e1a, 0xfd5e8919, 0x9ad07418,
0xa994f1ee, 0xce1a0cef, 0x66890bec, 0x0107f6ed, 0x6486e636, 0x03081b37,
0xab9b1c34, 0xcc15e135, 0xff5164c3, 0x98df99c2, 0x304c9ec1, 0x57c263c0,
0x56c5952d, 0x314b682c, 0x99d86f2f, 0xfe56922e, 0xcd1217d8, 0xaa9cead9,
0x020fedda, 0x658110db, 0xc90dcc6c, 0xae83316d, 0x0610366e, 0x619ecb6f,
0x52da4e99, 0x3554b398, 0x9dc7b49b, 0xfa49499a, 0xfb4ebf77, 0x9cc04276,
0x34534575, 0x53ddb874, 0x60993d82, 0x0717c083, 0xaf84c780, 0xc80a3a81,
0xad8b2a5a, 0xca05d75b, 0x6296d058, 0x05182d59, 0x365ca8af, 0x51d255ae,
0xf94152ad, 0x9ecfafac, 0x9fc85941, 0xf846a440, 0x50d5a343, 0x375b5e42,
0x041fdbb4, 0x639126b5, 0xcb0221b6, 0xac8cdcb7, 0x97f7ee29, 0xf0791328,
0x58ea142b, 0x3f64e92a, 0x0c206cdc, 0x6bae91dd, 0xc33d96de, 0xa4b36bdf,
0xa5b49d32, 0xc23a6033, 0x6aa96730, 0x0d279a31, 0x3e631fc7, 0x59ede2c6,
0xf17ee5c5, 0x96f018c4, 0xf371081f, 0x94fff51e, 0x3c6cf21d, 0x5be20f1c,
0x68a68aea, 0x0f2877eb, 0xa7bb70e8, 0xc0358de9, 0xc1327b04, 0xa6bc8605,
0x0e2f8106, 0x69a17c07, 0x5ae5f9f1, 0x3d6b04f0, 0x95f803f3, 0xf276fef2,
0x5efa2245, 0x3974df44, 0x91e7d847, 0xf6692546, 0xc52da0b0, 0xa2a35db1,
0x0a305ab2, 0x6dbea7b3, 0x6cb9515e, 0x0b37ac5f, 0xa3a4ab5c, 0xc42a565d,
0xf76ed3ab, 0x90e02eaa, 0x387329a9, 0x5ffdd4a8, 0x3a7cc473, 0x5df23972,
0xf5613e71, 0x92efc370, 0xa1ab4686, 0xc625bb87, 0x6eb6bc84, 0x09384185,
0x083fb768, 0x6fb14a69, 0xc7224d6a, 0xa0acb06b, 0x93e8359d, 0xf466c89c,
0x5cf5cf9f, 0x3b7b329e, 0x2a03aaa3, 0x4d8d57a2, 0xe51e50a1, 0x8290ada0,
0xb1d42856, 0xd65ad557, 0x7ec9d254, 0x19472f55, 0x1840d9b8, 0x7fce24b9,
0xd75d23ba, 0xb0d3debb, 0x83975b4d, 0xe419a64c, 0x4c8aa14f, 0x2b045c4e,
0x4e854c95, 0x290bb194, 0x8198b697, 0xe6164b96, 0xd552ce60, 0xb2dc3361,
0x1a4f3462, 0x7dc1c963, 0x7cc63f8e, 0x1b48c28f, 0xb3dbc58c, 0xd455388d,
0xe711bd7b, 0x809f407a, 0x280c4779, 0x4f82ba78, 0xe30e66cf, 0x84809bce,
0x2c139ccd, 0x4b9d61cc, 0x78d9e43a, 0x1f57193b, 0xb7c41e38, 0xd04ae339,
0xd14d15d4, 0xb6c3e8d5, 0x1e50efd6, 0x79de12d7, 0x4a9a9721, 0x2d146a20,
0x85876d23, 0xe2099022, 0x878880f9, 0xe0067df8, 0x48957afb, 0x2f1b87fa,
0x1c5f020c, 0x7bd1ff0d, 0xd342f80e, 0xb4cc050f, 0xb5cbf3e2, 0xd2450ee3,
0x7ad609e0, 0x1d58f4e1, 0x2e1c7117, 0x49928c16, 0xe1018b15, 0x868f7614,
0xbdf4448a, 0xda7ab98b, 0x72e9be88, 0x15674389, 0x2623c67f, 0x41ad3b7e,
0xe93e3c7d, 0x8eb0c17c, 0x8fb73791, 0xe839ca90, 0x40aacd93, 0x27243092,
0x1460b564, 0x73ee4865, 0xdb7d4f66, 0xbcf3b267, 0xd972a2bc, 0xbefc5fbd,
0x166f58be, 0x71e1a5bf, 0x42a52049, 0x252bdd48, 0x8db8da4b, 0xea36274a,
0xeb31d1a7, 0x8cbf2ca6, 0x242c2ba5, 0x43a2d6a4, 0x70e65352, 0x1768ae53,
0xbffba950, 0xd8755451, 0x74f988e6, 0x137775e7, 0xbbe472e4, 0xdc6a8fe5,
0xef2e0a13, 0x88a0f712, 0x2033f011, 0x47bd0d10, 0x46bafbfd, 0x213406fc,
0x89a701ff, 0xee29fcfe, 0xdd6d7908, 0xbae38409, 0x1270830a, 0x75fe7e0b,
0x107f6ed0, 0x77f193d1, 0xdf6294d2, 0xb8ec69d3, 0x8ba8ec25, 0xec261124,
0x44b51627, 0x233beb26, 0x223c1dcb, 0x45b2e0ca, 0xed21e7c9, 0x8aaf1ac8,
0xb9eb9f3e, 0xde65623f, 0x76f6653c, 0x1178983d};
const uint32_t kStrideExtensionTable3[256] = {
0x00000000, 0xf20c0dfe, 0xe1f46d0d, 0x13f860f3, 0xc604aceb, 0x3408a115,
0x27f0c1e6, 0xd5fccc18, 0x89e52f27, 0x7be922d9, 0x6811422a, 0x9a1d4fd4,
0x4fe183cc, 0xbded8e32, 0xae15eec1, 0x5c19e33f, 0x162628bf, 0xe42a2541,
0xf7d245b2, 0x05de484c, 0xd0228454, 0x222e89aa, 0x31d6e959, 0xc3dae4a7,
0x9fc30798, 0x6dcf0a66, 0x7e376a95, 0x8c3b676b, 0x59c7ab73, 0xabcba68d,
0xb833c67e, 0x4a3fcb80, 0x2c4c517e, 0xde405c80, 0xcdb83c73, 0x3fb4318d,
0xea48fd95, 0x1844f06b, 0x0bbc9098, 0xf9b09d66, 0xa5a97e59, 0x57a573a7,
0x445d1354, 0xb6511eaa, 0x63add2b2, 0x91a1df4c, 0x8259bfbf, 0x7055b241,
0x3a6a79c1, 0xc866743f, 0xdb9e14cc, 0x29921932, 0xfc6ed52a, 0x0e62d8d4,
0x1d9ab827, 0xef96b5d9, 0xb38f56e6, 0x41835b18, 0x527b3beb, 0xa0773615,
0x758bfa0d, 0x8787f7f3, 0x947f9700, 0x66739afe, 0x5898a2fc, 0xaa94af02,
0xb96ccff1, 0x4b60c20f, 0x9e9c0e17, 0x6c9003e9, 0x7f68631a, 0x8d646ee4,
0xd17d8ddb, 0x23718025, 0x3089e0d6, 0xc285ed28, 0x17792130, 0xe5752cce,
0xf68d4c3d, 0x048141c3, 0x4ebe8a43, 0xbcb287bd, 0xaf4ae74e, 0x5d46eab0,
0x88ba26a8, 0x7ab62b56, 0x694e4ba5, 0x9b42465b, 0xc75ba564, 0x3557a89a,
0x26afc869, 0xd4a3c597, 0x015f098f, 0xf3530471, 0xe0ab6482, 0x12a7697c,
0x74d4f382, 0x86d8fe7c, 0x95209e8f, 0x672c9371, 0xb2d05f69, 0x40dc5297,
0x53243264, 0xa1283f9a, 0xfd31dca5, 0x0f3dd15b, 0x1cc5b1a8, 0xeec9bc56,
0x3b35704e, 0xc9397db0, 0xdac11d43, 0x28cd10bd, 0x62f2db3d, 0x90fed6c3,
0x8306b630, 0x710abbce, 0xa4f677d6, 0x56fa7a28, 0x45021adb, 0xb70e1725,
0xeb17f41a, 0x191bf9e4, 0x0ae39917, 0xf8ef94e9, 0x2d1358f1, 0xdf1f550f,
0xcce735fc, 0x3eeb3802, 0xb13145f8, 0x433d4806, 0x50c528f5, 0xa2c9250b,
0x7735e913, 0x8539e4ed, 0x96c1841e, 0x64cd89e0, 0x38d46adf, 0xcad86721,
0xd92007d2, 0x2b2c0a2c, 0xfed0c634, 0x0cdccbca, 0x1f24ab39, 0xed28a6c7,
0xa7176d47, 0x551b60b9, 0x46e3004a, 0xb4ef0db4, 0x6113c1ac, 0x931fcc52,
0x80e7aca1, 0x72eba15f, 0x2ef24260, 0xdcfe4f9e, 0xcf062f6d, 0x3d0a2293,
0xe8f6ee8b, 0x1afae375, 0x09028386, 0xfb0e8e78, 0x9d7d1486, 0x6f711978,
0x7c89798b, 0x8e857475, 0x5b79b86d, 0xa975b593, 0xba8dd560, 0x4881d89e,
0x14983ba1, 0xe694365f, 0xf56c56ac, 0x07605b52, 0xd29c974a, 0x20909ab4,
0x3368fa47, 0xc164f7b9, 0x8b5b3c39, 0x795731c7, 0x6aaf5134, 0x98a35cca,
0x4d5f90d2, 0xbf539d2c, 0xacabfddf, 0x5ea7f021, 0x02be131e, 0xf0b21ee0,
0xe34a7e13, 0x114673ed, 0xc4babff5, 0x36b6b20b, 0x254ed2f8, 0xd742df06,
0xe9a9e704, 0x1ba5eafa, 0x085d8a09, 0xfa5187f7, 0x2fad4bef, 0xdda14611,
0xce5926e2, 0x3c552b1c, 0x604cc823, 0x9240c5dd, 0x81b8a52e, 0x73b4a8d0,
0xa64864c8, 0x54446936, 0x47bc09c5, 0xb5b0043b, 0xff8fcfbb, 0x0d83c245,
0x1e7ba2b6, 0xec77af48, 0x398b6350, 0xcb876eae, 0xd87f0e5d, 0x2a7303a3,
0x766ae09c, 0x8466ed62, 0x979e8d91, 0x6592806f, 0xb06e4c77, 0x42624189,
0x519a217a, 0xa3962c84, 0xc5e5b67a, 0x37e9bb84, 0x2411db77, 0xd61dd689,
0x03e11a91, 0xf1ed176f, 0xe215779c, 0x10197a62, 0x4c00995d, 0xbe0c94a3,
0xadf4f450, 0x5ff8f9ae, 0x8a0435b6, 0x78083848, 0x6bf058bb, 0x99fc5545,
0xd3c39ec5, 0x21cf933b, 0x3237f3c8, 0xc03bfe36, 0x15c7322e, 0xe7cb3fd0,
0xf4335f23, 0x063f52dd, 0x5a26b1e2, 0xa82abc1c, 0xbbd2dcef, 0x49ded111,
0x9c221d09, 0x6e2e10f7, 0x7dd67004, 0x8fda7dfa};
static constexpr const uint32_t kCRC32Xor = static_cast<uint32_t>(0xffffffffU);
inline uint32_t ReadUint32LE(const uint8_t* buffer) {
return DecodeFixed32(reinterpret_cast<const char*>(buffer));
}
template <int N>
constexpr inline const uint8_t* RoundUp(const uint8_t* pointer) {
return reinterpret_cast<uint8_t*>(
(reinterpret_cast<uintptr_t>(pointer) + (N - 1)) &
~static_cast<uintptr_t>(N - 1));
}
}
static bool CanAccelerateCRC32C() {
static const char kTestCRCBuffer[] = "TestCRCBuffer";
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
static const uint32_t kTestCRCValue = 0xdcbc59fa;
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
return port::AcceleratedCRC32C(crc, data, n);
}
const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
const uint8_t* e = p + n;
uint32_t l = crc ^ kCRC32Xor;
#define STEP1 \
do { \
int c = (l & 0xff) ^ *p++; \
l = kByteExtensionTable[c] ^ (l >> 8); \
} while (0)
#define STEP4(s) \
do { \
crc##s = ReadUint32LE(p + s * 4) ^ kStrideExtensionTable3[crc##s & 0xff] ^ \
kStrideExtensionTable2[(crc##s >> 8) & 0xff] ^ \
kStrideExtensionTable1[(crc##s >> 16) & 0xff] ^ \
kStrideExtensionTable0[crc##s >> 24]; \
} while (0)
#define STEP16 \
do { \
STEP4(0); \
STEP4(1); \
STEP4(2); \
STEP4(3); \
p += 16; \
} while (0)
#define STEP4W(w) \
do { \
w ^= l; \
for (size_t i = 0; i < 4; ++i) { \
w = (w >> 8) ^ kByteExtensionTable[w & 0xff]; \
} \
l = w; \
} while (0)
const uint8_t* x = RoundUp<4>(p);
if (x <= e) {
while (p != x) {
STEP1;
}
}
if ((e - p) >= 16) {
uint32_t crc0 = ReadUint32LE(p + 0 * 4) ^ l;
uint32_t crc1 = ReadUint32LE(p + 1 * 4);
uint32_t crc2 = ReadUint32LE(p + 2 * 4);
uint32_t crc3 = ReadUint32LE(p + 3 * 4);
p += 16;
while ((e - p) >= 16) {
STEP16;
}
while ((e - p) >= 4) {
STEP4(0);
uint32_t tmp = crc0;
crc0 = crc1;
crc1 = crc2;
crc2 = crc3;
crc3 = tmp;
p += 4;
}
l = 0;
STEP4W(crc0);
STEP4W(crc1);
STEP4W(crc2);
STEP4W(crc3);
}
while (p != e) {
STEP1;
}
#undef STEP4W
#undef STEP16
#undef STEP4
#undef STEP1
return l ^ kCRC32Xor;
}
}
} | #include "util/crc32c.h"
#include "gtest/gtest.h"
namespace leveldb {
namespace crc32c {
TEST(CRC, StandardResults) {
char buf[32];
memset(buf, 0, sizeof(buf));
ASSERT_EQ(0x8a9136aa, Value(buf, sizeof(buf)));
memset(buf, 0xff, sizeof(buf));
ASSERT_EQ(0x62a8ab43, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = i;
}
ASSERT_EQ(0x46dd794e, Value(buf, sizeof(buf)));
for (int i = 0; i < 32; i++) {
buf[i] = 31 - i;
}
ASSERT_EQ(0x113fdb5c, Value(buf, sizeof(buf)));
uint8_t data[48] = {
0x01, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x18, 0x28, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
ASSERT_EQ(0xd9963a56, Value(reinterpret_cast<char*>(data), sizeof(data)));
}
TEST(CRC, Values) { ASSERT_NE(Value("a", 1), Value("foo", 3)); }
TEST(CRC, Extend) {
ASSERT_EQ(Value("hello world", 11), Extend(Value("hello ", 6), "world", 5));
}
TEST(CRC, Mask) {
uint32_t crc = Value("foo", 3);
ASSERT_NE(crc, Mask(crc));
ASSERT_NE(crc, Mask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Mask(crc)));
ASSERT_EQ(crc, Unmask(Unmask(Mask(Mask(crc)))));
}
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/crc32c.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/crc32c_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
7f4f04cd-ce97-42db-8c4c-727b29e637dd | cpp | tensorflow/tensorflow | cudnn_fused_conv_rewriter | third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter.cc | third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter_test.cc | #include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/ml_dtypes.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
bool IsConvCustomCall(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kCustomCall &&
(instr->custom_call_target() == kCudnnConvForwardCallTarget ||
instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget);
}
bool IsConvDepthwise(const HloInstruction* instr) {
int64_t feature_group_count = instr->feature_group_count();
if (feature_group_count == 1) {
return false;
}
const HloInstruction* input = instr->operand(0);
int64_t input_feature_dimension =
instr->convolution_dimension_numbers().input_feature_dimension();
int64_t input_feature_count =
input->shape().dimensions(input_feature_dimension);
return input_feature_count == feature_group_count;
}
bool IsNonDepthwiseConvCustomCall(const HloInstruction* instr) {
return IsConvCustomCall(instr) && !IsConvDepthwise(instr);
}
bool IsROCm(se::GpuComputeCapability cc) {
return std::holds_alternative<se::RocmComputeCapability>(cc);
}
bool ShouldUseCudnnRuntimeFusion(const DebugOptions& debug_opts,
se::GpuComputeCapability cc) {
const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&cc);
if (cuda_cc != nullptr)
return debug_opts.xla_gpu_use_runtime_fusion() && cuda_cc->IsAtLeast(7, 5);
else
return true;
}
bool IsSuitableForCudnnRuntimeFusion(HloInstruction* conv) {
if (conv->operands().size() > 3) {
return false;
}
if (conv->operand(0)->shape().element_type() != F16) {
return false;
}
const Shape& shape = conv->operand(1)->shape();
int64_t num_input_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_input_feature_dimension());
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().kernel_output_feature_dimension());
if (num_input_features % 2 != 0 || num_output_features % 2 != 0) {
return false;
}
return true;
}
bool IsLosslesslyConvertibleTo(const HloInstruction* instr,
PrimitiveType dst_ty) {
if (instr->shape().element_type() == dst_ty) {
return true;
}
if (Match(instr, m::Convert(m::Op().WithElementType(dst_ty)))) {
return primitive_util::CastPreservesValues(dst_ty,
instr->shape().element_type());
}
if (instr->opcode() == HloOpcode::kConstant) {
if (!instr->shape().IsArray()) {
return false;
}
PrimitiveType orig_ty = instr->shape().element_type();
absl::StatusOr<Literal> converted1 = instr->literal().Convert(dst_ty);
if (!converted1.ok()) {
return false;
}
absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty);
if (!converted2.ok()) {
return false;
}
return instr->literal() == *converted2;
}
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose) {
return IsLosslesslyConvertibleTo(instr->operand(0), dst_ty);
}
return false;
}
bool IsLosslesslyConvertibleToS8(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, S8);
}
bool IsLosslesslyConvertibleToF16(const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, F16);
}
absl::StatusOr<HloInstruction*> EnsureIsConvBiasActivation(
HloInstruction* conv) {
CHECK_EQ(conv->opcode(), HloOpcode::kCustomCall);
if (conv->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget) {
return conv;
}
if (conv->custom_call_target() == kCudnnConvForwardCallTarget) {
HloComputation* comp = conv->parent();
const Shape& shape = conv->shape().tuple_shapes(0);
int64_t num_output_features = shape.dimensions(
conv->convolution_dimension_numbers().output_feature_dimension());
PrimitiveType bias_ty;
if (primitive_util::IsIntegralType(shape.element_type())) {
bias_ty = F32;
} else {
bias_ty = shape.element_type();
}
auto bias = BroadcastZeros(comp, bias_ty, {num_output_features});
absl::InlinedVector<HloInstruction*, 3> new_operands(
conv->operands().begin(), conv->operands().end());
new_operands.push_back(bias);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv));
new_conv->set_custom_call_target(kCudnnConvBiasActivationForwardCallTarget);
comp->parent()->SetAndUniquifyInstrName(new_conv,
"cudnn-conv-bias-activation");
return new_conv;
}
return FailedPrecondition("Unsupported conv: %s", conv->ToString());
}
absl::StatusOr<bool> FuseConvertTypeIntoConv(HloComputation* comp,
PrimitiveType conv_type,
PrimitiveType cvt_type) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
auto tuple_elem =
m::GetTupleElement(m::Op(&conv).WithPredicate(IsConvCustomCall), 0)
.WithElementType(conv_type);
auto pattern =
m::Convert(tuple_elem.WithOneUser()).WithElementType(cvt_type);
if (!Match(instr, pattern)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvertTypeIntoConv: ", conv->ToString());
})) {
continue;
}
Shape new_shape = conv->shape();
new_shape.mutable_tuple_shapes(0)->set_element_type(cvt_type);
HloInstruction* new_conv =
comp->AddInstruction(conv->CloneWithNewShape(new_shape));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(HloInstruction * new_gte,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_gte));
changed = true;
}
return changed;
}
struct ConvConvertTypes {
PrimitiveType convolution_type;
PrimitiveType conversion_type;
};
absl::StatusOr<bool> FuseRemoveConvertInConv(HloComputation* comp) {
bool changed = false;
std::array<ConvConvertTypes, 3> types{{
{S32, F32},
{S8, F32},
{F32, S8},
}};
for (auto [conv_type, cvt_type] : types) {
TF_ASSIGN_OR_RETURN(bool curr_change,
FuseConvertTypeIntoConv(comp, conv_type, cvt_type));
changed |= curr_change;
}
return changed;
}
absl::StatusOr<bool> FuseConvAlpha(HloComputation* comp) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
HloInstruction* gte = nullptr;
HloInstruction* alpha = nullptr;
auto pattern = m::MultiplyAnyOrder(
m::GetTupleElement(
>e, m::Op(&conv).WithPredicate(IsNonDepthwiseConvCustomCall), 0)
.WithOneUse(),
m::Broadcast(m::ConstantEffectiveScalar(&alpha)));
if (!Match(instr, pattern)) {
continue;
}
PrimitiveType alpha_ty = gte->shape().element_type() == F64 ? F64 : F32;
if (!IsLosslesslyConvertibleTo(alpha, alpha_ty)) {
continue;
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.conv_result_scale() != 1) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvAlpha: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64));
config.set_conv_result_scale(alpha_f64.GetFirstElement<double>());
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(conv->parent()->ReplaceInstruction(instr, gte));
changed = true;
}
return changed;
}
class GraphString {
public:
GraphString() = default;
bool AppendOp(std::string op_name, HloInstruction* op,
std::vector<HloInstruction*> operands = {}) {
std::optional<int64_t> operand_uid;
int num_operands_in_graph = 0;
for (HloInstruction* operand : operands) {
if (OpInGraph(operand->unique_id())) {
num_operands_in_graph++;
if (num_operands_in_graph > 1) {
return false;
}
operand_uid = operand->unique_id();
}
}
graph_.emplace_back(OpDescriptor(
{op->unique_id(), op->shape().element_type(), op_name, operand_uid}));
return true;
}
void ChangeDataType(PrimitiveType type) {
DCHECK(!graph_.empty());
graph_.back().output_type = type;
}
std::string Graph() const {
std::string graph;
for (OpDescriptor op : graph_) {
graph.append(std::to_string(op.uid));
graph.append(":[" +
primitive_util::LowercasePrimitiveTypeName(op.output_type) +
"]");
graph.append(op.name);
graph.append("(");
if (op.operand.has_value()) {
graph.append(std::to_string(*op.operand));
}
graph.append(");");
}
return graph;
}
bool OpInGraph(int64_t uid, std::string op_name = "") const {
auto op_filter = [&](OpDescriptor op) -> bool {
if (op_name.empty()) {
return op.uid == uid;
} else {
return op.uid == uid && op.name == op_name;
}
};
return std::find_if(graph_.begin(), graph_.end(), op_filter) !=
graph_.end();
}
private:
struct OpDescriptor {
int64_t uid;
PrimitiveType output_type;
std::string name;
std::optional<int64_t> operand;
};
std::vector<OpDescriptor> graph_;
};
bool IsF8Type(const HloInstruction* instr) {
return primitive_util::IsF8Type(instr->shape().element_type());
}
bool IsScalar(const HloInstruction* instr) {
return ShapeUtil::IsScalar(instr->shape());
}
std::optional<PrimitiveType> IsSaturatingCastToF8(HloInstruction* instr) {
HloInstruction *op, *clamp_lower, *clamp_upper;
if (Match(instr,
m::Convert(
&op,
m::Clamp(m::Broadcast(m::ConstantScalar(&clamp_lower)), m::Op(),
m::Broadcast(m::ConstantScalar(&clamp_upper))))) &&
((op->shape().element_type() == F8E4M3FN &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e4m3fn>::max()))) ||
(op->shape().element_type() == F8E5M2 &&
clamp_lower->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::lowest())) &&
clamp_upper->literal().IsAllFloat(static_cast<float>(
std::numeric_limits<tsl::float8_e5m2>::max()))))) {
return op->shape().element_type();
}
return std::nullopt;
}
bool AppliesMaxReduce(HloInstruction* op) {
HloComputation* reduce_comp = op->to_apply();
HloInstruction* reduce_comp_root = reduce_comp->root_instruction();
return ShapeUtil::IsScalar(op->shape()) &&
ShapeUtil::IsScalar(op->operand(1)->shape()) &&
op->operand(1)->IsConstant() &&
op->operand(1)->literal().GetAsDouble({}) <= 0. &&
reduce_comp_root->opcode() == HloOpcode::kMaximum &&
reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter &&
reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter;
}
void CaptureConvGraphRecursive(HloInstruction* instr,
std::vector<HloInstruction*>& operands,
std::vector<HloInstruction*>& aux_outputs,
GraphString& graph_string,
absl::flat_hash_set<int>& visited_instrs,
HloInstruction*& final_instr) {
if (!visited_instrs.emplace(instr->unique_id()).second) {
return;
}
final_instr = instr;
GraphString init_graph_string = graph_string;
std::vector<HloInstruction*> init_operands = operands,
init_aux_outputs = aux_outputs;
int num_linear_users = 0, num_nonlinear_users = 0;
for (HloInstruction* user : instr->users()) {
HloInstruction *op, *operand0, *operand1;
if (Match(user, m::AddAnyOrder(&op, m::Op(&operand0), m::Op(&operand1)))) {
if (graph_string.AppendOp("add", op, {operand0, operand1})) {
operands.push_back(operand0 == instr ? operand1 : operand0);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MultiplyAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("scale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Divide(&op, m::Op(&operand0),
m::Broadcast(m::Op(&operand1)))) &&
ShapeUtil::IsScalar(operand1->shape())) {
if (graph_string.AppendOp("invscale", op, {operand0, operand1})) {
operands.push_back(operand1);
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::MaximumAnyOrder(&op, m::Op(&operand0),
m::Broadcast(m::ConstantScalar(0))))) {
if (graph_string.AppendOp("relu", op, {operand0})) {
num_linear_users++;
CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
}
continue;
}
if (Match(user, m::Reduce(&op, m::Op(&operand0), m::Op())) &&
graph_string.OpInGraph(operand0->unique_id(), "relu") &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
if (!user->users().empty()) {
HloInstruction* users_user = user->users()[0];
std::optional<PrimitiveType> f8_type = IsSaturatingCastToF8(users_user);
if (f8_type.has_value()) {
graph_string.ChangeDataType(f8_type.value());
num_linear_users++;
CaptureConvGraphRecursive(users_user, operands, aux_outputs,
graph_string, visited_instrs, final_instr);
continue;
}
if (Match(users_user,
m::Reduce(&op, m::Abs(m::Op(&operand0)), m::Op())) &&
AppliesMaxReduce(op)) {
if (graph_string.AppendOp("amax", op, {operand0})) {
aux_outputs.emplace_back(op);
num_nonlinear_users++;
}
continue;
}
}
}
if (num_linear_users > 1 || num_nonlinear_users > 1 ||
num_linear_users + num_nonlinear_users < instr->user_count()) {
graph_string = init_graph_string;
operands = init_operands;
aux_outputs = init_aux_outputs;
final_instr = instr;
}
}
absl::StatusOr<
std::tuple<std::vector<HloInstruction*>, std::vector<HloInstruction*>,
GraphString, HloInstruction*>>
CaptureConvGraph(HloInstruction* instr, HloInstruction* convolution,
HloInstruction* wide_input, HloInstruction* wide_filter,
HloInstruction* input_scale, HloInstruction* filter_scale,
bool x_mult_scale, bool w_mult_scale) {
GraphString graph_string;
graph_string.AppendOp("conv", instr);
HloInstruction *input_scaled_conv, *filter_scaled_conv;
if (input_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(0, wide_input));
HloInstruction* bcast_input_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), input_scale, {}));
input_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
x_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide, instr,
bcast_input_scale));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(input_scaled_conv));
}
if (filter_scale) {
TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(1, wide_filter));
HloInstruction* bcast_filter_scale = instr->AddInstruction(
HloInstruction::CreateBroadcast(instr->shape(), filter_scale, {}));
filter_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary(
instr->shape(),
w_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide,
input_scale ? input_scaled_conv : instr, bcast_filter_scale));
TF_RETURN_IF_ERROR((input_scale ? input_scaled_conv : instr)
->ReplaceAllUsesWith(filter_scaled_conv));
}
std::vector<HloInstruction*> operands, aux_outputs;
absl::flat_hash_set<int> visited_instrs;
HloInstruction* final_instr;
CaptureConvGraphRecursive(instr, operands, aux_outputs, graph_string,
visited_instrs, final_instr);
return std::make_tuple(operands, aux_outputs, graph_string, final_instr);
}
absl::StatusOr<bool> F8GraphConv(HloComputation* comp,
se::CudaComputeCapability cc,
se::dnn::VersionInfo dnn_version,
const se::SemanticVersion& toolkit_version) {
bool changed = false;
if (dnn_version < se::dnn::VersionInfo(8, 9, 0)) {
return false;
}
if (toolkit_version < se::SemanticVersion{12, 0, 0}) {
return false;
}
if (!cc.IsAtLeast(se::CudaComputeCapability::HOPPER)) {
return false;
}
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction *convolution, *gte, *input, *filter,
*input_scale = nullptr, *filter_scale = nullptr,
*input_scale_op = nullptr, *filter_scale_op = nullptr,
*wide_input = nullptr, *wide_filter = nullptr;
auto conv_operand_maybe_scaled = [](HloInstruction** operand,
HloInstruction** wide_operand,
HloInstruction** scale_op,
HloInstruction** scale) {
return m::AnyOf<HloInstruction>(
m::Op(operand).WithPredicate(IsF8Type),
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Divide(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))),
m::MultiplyAnyOrder(
scale_op,
m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)),
m::Broadcast(m::Op(scale).WithPredicate(IsScalar))));
};
auto pattern = m::GetTupleElement(
>e,
m::CustomCall(
&convolution,
conv_operand_maybe_scaled(&input, &wide_input, &input_scale_op,
&input_scale),
conv_operand_maybe_scaled(&filter, &wide_filter, &filter_scale_op,
&filter_scale))
.WithPredicate(IsConvCustomCall),
0);
if (Match(instr, pattern)) {
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("F8GraphConv: ", convolution->ToString());
})) {
continue;
}
std::vector<HloInstruction*> operands, aux_outputs;
GraphString graph_string;
HloInstruction* final_instr;
TF_ASSIGN_OR_RETURN(
std::tie(operands, aux_outputs, graph_string, final_instr),
CaptureConvGraph(
instr, convolution, wide_input, wide_filter, input_scale,
filter_scale,
input_scale_op ? input_scale_op->opcode() == HloOpcode::kMultiply
: false,
filter_scale_op
? filter_scale_op->opcode() == HloOpcode::kMultiply
: false));
TF_ASSIGN_OR_RETURN(auto gpu_config,
convolution->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
config.set_serialized_graph(graph_string.Graph());
operands.insert(operands.begin(), input);
operands.insert(operands.begin() + 1, filter);
std::vector<Shape> output_shapes;
output_shapes.emplace_back(ShapeUtil::ChangeElementType(
ShapeUtil::GetTupleElementShape(convolution->shape(), 0),
final_instr->shape().element_type()));
for (HloInstruction* aux_output : aux_outputs) {
output_shapes.emplace_back(aux_output->shape());
}
output_shapes.emplace_back(
ShapeUtil::GetTupleElementShape(convolution->shape(), 1));
HloInstruction* new_convolution =
comp->AddInstruction(convolution->CloneWithNewOperands(
ShapeUtil::MakeTupleShape(output_shapes), operands));
new_convolution->set_custom_call_target(kCudnnConvForwardGraphCallTarget);
TF_RETURN_IF_ERROR(new_convolution->set_backend_config(gpu_config));
TF_ASSIGN_OR_RETURN(HloInstruction * new_gte,
MakeGetTupleElementHlo(new_convolution, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(final_instr, new_gte));
for (int i = 0; i < aux_outputs.size(); ++i) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_gte,
MakeGetTupleElementHlo(new_convolution, i + 1));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(aux_outputs[i], new_gte));
}
changed = true;
}
}
return changed;
}
absl::StatusOr<bool> FuseBiasOrSideInput(HloComputation* comp) {
bool changed = false;
for (auto instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv = nullptr;
HloInstruction* gte = nullptr;
HloInstruction* addend = nullptr;
auto pattern = m::AddAnyOrder(
m::GetTupleElement(>e,
m::Op(&conv)
.WithPredicate(IsNonDepthwiseConvCustomCall)
.WithOneUse(),
0)
.WithOneUse(),
m::Op(&addend));
if (!Match(instr, pattern)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseBiasOrSideInput: ", conv->ToString());
})) {
continue;
}
if (conv->custom_call_target() == kCudnnConvForwardCallTarget) {
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.activation_mode() != se::dnn::kNone) {
continue;
}
bool can_accept_bias =
Match(conv->operand(2), m::Broadcast(m::ConstantEffectiveScalar(0)));
bool can_accept_side_input = conv->operand_count() < 4;
PrimitiveType conv_ty = gte->shape().element_type();
PrimitiveType bias_ty =
primitive_util::IsFloatingPointType(conv_ty) ? conv_ty : F32;
bool addend_may_be_rank1_bias =
addend->opcode() == HloOpcode::kBroadcast &&
addend->dimensions().size() == 1 &&
addend->dimensions(0) ==
conv->convolution_dimension_numbers().output_feature_dimension() &&
IsLosslesslyConvertibleTo(addend, bias_ty);
bool addend_may_be_rank0_bias = addend->opcode() == HloOpcode::kBroadcast &&
addend->dimensions().empty() &&
IsLosslesslyConvertibleTo(addend, bias_ty);
absl::InlinedVector<HloInstruction*, 4> new_operands(
conv->operands().begin(), conv->operands().end());
if (can_accept_bias && addend_may_be_rank1_bias) {
new_operands[2] = MakeConvertToHlo(addend->mutable_operand(0), bias_ty,
&addend->operand(0)->metadata());
} else if (can_accept_bias && addend_may_be_rank0_bias) {
new_operands[2] = MakeBroadcastHlo(
MakeConvertToHlo(addend->mutable_operand(0), bias_ty,
&addend->operand(0)->metadata()),
{},
{gte->shape().dimensions(conv->convolution_dimension_numbers()
.output_feature_dimension())});
} else if (can_accept_side_input) {
CHECK_EQ(new_operands.size(), 3);
new_operands.push_back(addend);
config.set_side_input_scale(1);
} else {
continue;
}
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), new_operands));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_RETURN_IF_ERROR(new_conv->set_backend_config(gpu_config));
TF_ASSIGN_OR_RETURN(HloInstruction * new_instr,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseSideInputAlpha(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction* conv;
HloInstruction* side_input;
auto pattern = m::Op(&conv)
.WithPredicate(IsConvCustomCall)
.WithOperand(3, m::Op(&side_input));
if (!Match(instr, pattern)) {
continue;
}
TF_ASSIGN_OR_RETURN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.side_input_scale() != 1) {
continue;
}
HloInstruction* before_reshape = side_input;
while (before_reshape->opcode() == HloOpcode::kReshape ||
before_reshape->opcode() == HloOpcode::kTranspose) {
before_reshape = before_reshape->mutable_operand(0);
}
PrimitiveType conv_ty = conv->shape().tuple_shapes(0).element_type();
PrimitiveType alpha_ty = conv_ty == F64 ? F64 : F32;
HloInstruction* base;
HloInstruction* alpha;
if (!Match(
before_reshape,
m::MultiplyAnyOrder(
m::Op(&base),
m::Broadcast(m::ConstantEffectiveScalar(&alpha).WithPredicate(
[&](const HloInstruction* instr) {
return IsLosslesslyConvertibleTo(instr, alpha_ty);
}))))) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseSideInputAlpha: ", conv->ToString());
})) {
continue;
}
std::function<HloInstruction*(const HloInstruction*)> clone =
[&](const HloInstruction* instr) {
if (instr == before_reshape) {
return base;
}
CHECK(instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kTranspose)
<< "Must be reshape or transpose: " << instr->ToString();
return comp->AddInstruction(instr->CloneWithNewOperands(
instr->shape(), {clone(instr->operand(0))}));
};
absl::InlinedVector<HloInstruction*, 4> new_operands(
conv->operands().begin(), conv->operands().end());
new_operands[3] = clone(side_input);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(conv->shape(), new_operands));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64));
config.set_side_input_scale(alpha_f64.GetFirstElement<double>());
TF_RETURN_IF_ERROR(new_conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseElu(HloComputation* comp,
se::GpuComputeCapability cc) {
if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(),
cc)) {
return false;
}
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction *gte1, *gte2, *gte3;
HloInstruction* conv;
HloInstruction* expm1;
if (!Match(instr,
m::Select(m::Compare(m::GetTupleElement(>e1, m::Op()),
m::Broadcast(m::ConstantEffectiveScalar(0)))
.WithComparisonDirection(ComparisonDirection::kGt)
.WithOneUse(),
m::GetTupleElement(
>e2,
m::Op(&conv)
.WithPredicate(IsNonDepthwiseConvCustomCall)
.WithOneUse(),
0)
.WithElementType(F16),
m::Op(&expm1)
.WithOpcode(HloOpcode::kExpm1)
.WithOperand(0, m::GetTupleElement(>e3, m::Op()))
.WithOneUse()))) {
continue;
}
if (gte1 != gte2 || gte2 != gte3 || gte1->user_count() != 3) {
continue;
}
if (!IsSuitableForCudnnRuntimeFusion(conv)) {
continue;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.activation_mode() != se::dnn::kNone) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseElu: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
config.set_activation_mode(se::dnn::kElu);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte1));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseRelu(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction* gte;
HloInstruction* conv;
if (!Match(instr,
m::MaximumAnyOrder(
m::Broadcast(m::ConstantEffectiveScalar(0)),
m::GetTupleElement(
>e, m::Op(&conv)
.WithPredicate(IsNonDepthwiseConvCustomCall)
.WithOneUse())
.WithOneUse()))) {
continue;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.activation_mode() != se::dnn::kNone) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseRelu: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
config.set_activation_mode(se::dnn::kRelu);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseRelu6(HloComputation* comp,
se::GpuComputeCapability cc) {
if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(),
cc)) {
return false;
}
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction *gte, *conv;
if (!Match(
instr,
m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(0)),
m::GetTupleElement(
>e, m::Op(&conv)
.WithPredicate(IsNonDepthwiseConvCustomCall)
.WithOneUse())
.WithElementType(F16)
.WithOneUse(),
m::Broadcast(m::ConstantEffectiveScalar(6))))) {
continue;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.activation_mode() != se::dnn::kNone) {
continue;
}
if (!IsSuitableForCudnnRuntimeFusion(conv)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseRelu6: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
config.set_activation_mode(se::dnn::kRelu6);
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseLeakyRelu(HloComputation* comp,
se::GpuComputeCapability cc) {
if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(),
cc)) {
return false;
}
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction *gte1, *gte2, *gte3, *conv, *alpha;
if (!Match(instr,
m::Select(
m::Compare(m::GetTupleElement(>e1, m::Op()),
m::Broadcast(m::ConstantEffectiveScalar(0)))
.WithComparisonDirection(ComparisonDirection::kGt)
.WithOneUse(),
m::GetTupleElement(
>e2, m::Op(&conv)
.WithPredicate(IsNonDepthwiseConvCustomCall)
.WithOneUse())
.WithElementType(F16),
m::Multiply(m::GetTupleElement(>e3, m::Op()),
m::Broadcast(m::ConstantEffectiveScalar(&alpha)))
.WithOneUse()))) {
continue;
}
if (gte1 != gte2 || gte2 != gte3 || gte1->user_count() != 3) {
continue;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
conv->backend_config<GpuBackendConfig>());
CudnnConvBackendConfig& config =
*gpu_config.mutable_cudnn_conv_backend_config();
if (config.activation_mode() != se::dnn::kNone) {
continue;
}
if (!IsSuitableForCudnnRuntimeFusion(conv)) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseLeakyRelu: ", conv->ToString());
})) {
continue;
}
TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv));
config.set_activation_mode(se::dnn::kLeakyRelu);
TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64));
config.set_leakyrelu_alpha(alpha_f64.GetFirstElement<double>());
TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte1));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseConvertToF16(HloComputation* comp) {
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction* gte = nullptr;
HloInstruction* conv = nullptr;
auto f32_convertible_to_f16_pat =
m::Op().WithElementType(F32).WithPredicate(
IsLosslesslyConvertibleToF16);
if (!MatchAndLogIfFailed(
instr, "f16 conv",
m::Convert(
m::GetTupleElement(
>e,
m::Op(&conv)
.WithPredicate(IsConvCustomCall)
.WithOperand(0, f32_convertible_to_f16_pat)
.WithOperand(1, f32_convertible_to_f16_pat)
.WithOperandIfPresent(2, f32_convertible_to_f16_pat)
.WithOperandIfPresent(3, f32_convertible_to_f16_pat),
0)
.WithOneUse())
.WithElementType(F16),
VLOG_IS_ON(3),
m::Op().WithOperand(0, m::GetTupleElement(m::Op().WithPredicate(
IsConvCustomCall))))) {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvertToF16: ", conv->ToString());
})) {
continue;
}
VLOG(2) << "Matched fp16 conv: " << conv->ToString();
absl::InlinedVector<HloInstruction*, 4> new_operands;
for (HloInstruction* operand : conv->operands()) {
new_operands.push_back(
MakeConvertToHlo(operand, F16, &operand->metadata()));
}
Shape new_shape = conv->shape();
new_shape.mutable_tuple_shapes(0)->set_element_type(F16);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(new_shape, new_operands));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(HloInstruction * new_instr,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr));
changed = true;
}
return changed;
}
absl::StatusOr<bool> FuseConvertToS8(HloComputation* comp,
se::GpuComputeCapability cc) {
if (IsROCm(cc)) return false;
bool changed = false;
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
HloInstruction* gte = nullptr;
HloInstruction* conv = nullptr;
auto conv_pattern =
m::Op(&conv)
.WithPredicate(IsConvCustomCall)
.WithOperand(0, m::Op().WithPredicate(IsLosslesslyConvertibleToS8))
.WithOperand(1, m::Op().WithPredicate(IsLosslesslyConvertibleToS8));
PrimitiveType conv_output_ty;
if (MatchAndLogIfFailed(
instr, "s8->s8 conv",
m::Convert(m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(-128)),
m::GetTupleElement(
>e,
conv_pattern.WithOperandIfPresent(
3, m::Op().WithPredicate(
IsLosslesslyConvertibleToS8)),
0)
.WithOneUse(),
m::Broadcast(m::ConstantEffectiveScalar(127))))
.WithElementType(S8),
VLOG_IS_ON(3),
m::Convert(m::Clamp(m::Op(),
m::GetTupleElement(
m::Op().WithPredicate(IsConvCustomCall)),
m::Op()))
.WithElementType(S8))) {
conv_output_ty = S8;
} else if (MatchAndLogIfFailed(
instr, "s8->f32 conv",
m::GetTupleElement(>e,
conv_pattern.WithOperandIfPresent(
3, m::Op().WithElementType(F32)),
0)
.WithElementType(F32),
VLOG_IS_ON(3),
m::GetTupleElement(m::Op().WithPredicate(IsConvCustomCall))
.WithElementType(F32))) {
conv_output_ty = F32;
} else {
continue;
}
if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] {
return absl::StrCat("FuseConvertToS8: ", conv->ToString());
})) {
continue;
}
absl::InlinedVector<HloInstruction*, 4> new_operands(
conv->operands().begin(), conv->operands().end());
new_operands[0] =
MakeConvertToHlo(new_operands[0], S8, &new_operands[0]->metadata());
new_operands[1] =
MakeConvertToHlo(new_operands[1], S8, &new_operands[1]->metadata());
if (new_operands.size() >= 4) {
new_operands[3] = MakeConvertToHlo(new_operands[3], conv_output_ty,
&new_operands[3]->metadata());
}
Shape new_shape = conv->shape();
new_shape.mutable_tuple_shapes(0)->set_element_type(conv_output_ty);
HloInstruction* new_conv = comp->AddInstruction(
conv->CloneWithNewOperands(new_shape, new_operands));
comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name());
TF_ASSIGN_OR_RETURN(HloInstruction * new_instr,
MakeGetTupleElementHlo(new_conv, 0));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr));
changed = true;
}
return changed;
}
absl::Status CheckNoIllegalIntegerConvs(HloComputation* comp) {
auto is_integral_not_s8 = [](const Shape& s) {
return primitive_util::IsIntegralType(s.element_type()) &&
s.element_type() != S8;
};
std::vector<HloInstruction*> bad_convs;
for (HloInstruction* instr : comp->instructions()) {
if (!IsConvCustomCall(instr)) {
continue;
}
if (is_integral_not_s8(instr->shape().tuple_shapes(0)) ||
is_integral_not_s8(instr->operand(0)->shape()) ||
is_integral_not_s8(instr->operand(1)->shape()) ||
(instr->operand_count() >= 4 &&
is_integral_not_s8(instr->operand(3)->shape()))) {
bad_convs.push_back(instr);
}
}
if (bad_convs.empty()) {
return absl::OkStatus();
}
return Unimplemented(
R"(
Can't lower one or more integer convolutions to idioms supported by CuDNN.
CuDNN integer convolutions must have:
- s8 input and filter,
- f32 bias (if present),
- s8 or f32 output, and
- s8 side_input (if present) if output is s8.
For each of the unsupported convs below, we weren't able to lower one of the
operands or the output to the appropriate type.
See specific HLO idioms in cudnn_fused_conv_rewriter.h, and see cudnn semantics:
https:
https:
Unsupported convs:
%s
******* Full HLO module *******
%s
)",
absl::StrJoin(bad_convs, "\n",
[](std::string* out, HloInstruction* instr) {
absl::StrAppend(out, " - ", instr->ToString());
}),
comp->parent()->ToString());
}
void VlogStats(HloModule* module) {
if (!VLOG_IS_ON(1)) {
return;
}
VLOG(1) << "Results of CudnnFusedConvRewriter for " << module->name();
absl::flat_hash_map<std::string, int> stats;
for (HloComputation* comp : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : comp->instructions()) {
if (!Match(instr, m::Op().WithPredicate(IsConvCustomCall))) {
continue;
}
VLOG(3) << instr->ToString();
if (instr->custom_call_target() == kCudnnConvForwardCallTarget) {
++stats["01 non-fused forward convs"];
} else if (instr->custom_call_target() ==
kCudnnConvBiasActivationForwardCallTarget) {
++stats["02 fused forward convs"];
}
PrimitiveType conv_in_ty = instr->operand(0)->shape().element_type();
PrimitiveType conv_out_ty = instr->shape().tuple_shapes(0).element_type();
if (conv_in_ty == F32) {
++stats["10 f32 convs"];
} else if (conv_in_ty == F16) {
++stats["11 f16 convs"];
} else if (conv_in_ty == S8) {
if (conv_out_ty == S8) {
++stats["12 s8->s8 convs"];
} else if (conv_out_ty == F32) {
++stats["13 s8->f32 convs"];
} else {
LOG(ERROR) << "Unexpected conv: " << instr->ToString();
}
}
if (instr->operand_count() > 2) {
++stats["20 convs with bias"];
if (Match(instr->operand(2),
m::Broadcast(m::ConstantEffectiveScalar(0)))) {
++stats["21 convs with 0 bias"];
}
}
if (instr->operand_count() > 3) {
++stats["22 convs with side-input"];
}
auto gpu_config = instr->backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
LOG(ERROR) << "Couldn't parse backend config for " << instr->ToString();
continue;
}
const CudnnConvBackendConfig& config =
gpu_config->cudnn_conv_backend_config();
if (config.conv_result_scale() != 1) {
++stats["30 convs with result scale"];
}
if (config.side_input_scale() != 0 && config.side_input_scale() != 1) {
++stats["31 convs with side-input scale"];
}
++stats[absl::StrCat(
"32 convs with activation mode ",
se::dnn::ActivationMode_Name(config.activation_mode()))];
}
}
std::vector<std::pair<std::string, int>> stats_sorted(stats.begin(),
stats.end());
absl::c_sort(stats_sorted);
for (const auto& kv : stats_sorted) {
VLOG(1) << absl::StreamFormat("%4d %s", kv.second,
absl::string_view(kv.first).substr(3));
}
}
}
absl::StatusOr<bool> CudnnFusedConvRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool any_changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
bool changed = false;
if (!IsROCm(compute_capability_)) {
auto cc = std::get<se::CudaComputeCapability>(compute_capability_);
TF_ASSIGN_OR_RETURN(
changed, F8GraphConv(comp, cc, dnn_version_, toolkit_version_));
if (changed) {
return changed;
}
}
TF_ASSIGN_OR_RETURN(changed, FuseRemoveConvertInConv(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseConvAlpha(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseSideInputAlpha(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseRelu(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseElu(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseRelu6(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseLeakyRelu(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseConvertToF16(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseConvertToS8(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseSideInputAlpha(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseRelu(comp));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseElu(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseRelu6(comp, compute_capability_));
any_changed |= changed;
TF_ASSIGN_OR_RETURN(changed, FuseLeakyRelu(comp, compute_capability_));
any_changed |= changed;
TF_RETURN_IF_ERROR(CheckNoIllegalIntegerConvs(comp));
}
VlogStats(module);
return any_changed;
}
}
} | #include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h"
#include <array>
#include <initializer_list>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/convert_mover.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/gpu/transforms/conv_rewriter.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_mover.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/semantic_version.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = match;
using ::testing::HasSubstr;
using ::testing::Not;
static const std::initializer_list<absl::string_view> kf16f32f64{"f16", "f32",
"f64"};
static const std::initializer_list<absl::string_view> kf16f32{"f16", "f32"};
class CudnnFusedConvRewriterHloTest : public HloTestBase {
public:
bool IsCuda() const {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() const {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
se::SemanticVersion GetToolkitVersion() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.runtime_version();
}
CudnnFusedConvRewriterHloTest()
: HloTestBase(false,
false,
{}) {}
};
class CudnnFusedConvRewriterTest : public GpuCodegenTest {
public:
bool IsCuda() const {
return std::holds_alternative<se::CudaComputeCapability>(
backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability());
}
se::CudaComputeCapability GetCudaComputeCapability() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
stream_executor::dnn::VersionInfo GetDnnVersion() const {
return GetDnnVersionInfoOrDefault(backend().default_stream_executor());
}
stream_executor::SemanticVersion GetToolkitVersion() const {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.runtime_version();
}
protected:
std::string GetOptimizedHlo(absl::string_view hlo_string) {
HloModuleConfig config = GetModuleConfigForTest();
DebugOptions debug_opts = config.debug_options();
debug_opts.add_xla_disable_hlo_passes("cudnn_vectorize_convolutions");
debug_opts.set_xla_gpu_use_runtime_fusion(true);
config.set_debug_options(debug_opts);
auto result = backend().compiler()->RunHloPasses(
ParseAndReturnVerifiedModule(hlo_string, config).value(),
backend().default_stream_executor(), backend().memory_allocator());
if (!result.status().ok()) {
TF_EXPECT_OK(result.status())
<< "HLO compilation failed: " << result.status();
return "";
}
HloPrintOptions print_opts;
print_opts.set_print_operand_shape(false);
return (*result)->ToString(print_opts);
}
void TestMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : IsCuda() ? kf16f32f64 : kf16f32) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvForwardCallTarget)))
<< optimized_hlo_string;
EXPECT_THAT(optimized_hlo_string,
HasSubstr(kCudnnConvBiasActivationForwardCallTarget));
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_with_new_type));
DebugOptions debug_opts = module->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
module->mutable_config().set_debug_options(debug_opts);
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01}))
<< optimized_hlo_string;
}
}
void TestClamp(absl::string_view pre_hlo_string,
absl::string_view post_hlo_string) {
std::string alpha_conv_scalar, alpha_side_input_scalar;
std::string elementwise_type;
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.01}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, post_hlo_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
}
void TestNotMatchWithAllTypes(absl::string_view hlo_string) {
for (absl::string_view type : IsCuda() ? kf16f32f64 : kf16f32) {
const std::string hlo_with_new_type =
absl::StrReplaceAll(hlo_string, {{"TYPE", type}});
std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type);
SCOPED_TRACE(optimized_hlo_string);
EXPECT_THAT(optimized_hlo_string, HasSubstr(kCudnnConvForwardCallTarget));
EXPECT_THAT(optimized_hlo_string,
Not(HasSubstr(kCudnnConvBiasActivationForwardCallTarget)));
}
}
void TestF8(std::string pre_hlo_string, std::string custom_call_string,
std::string serialized_graph_string) {
if (!IsCuda()) return;
if (GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::HOPPER)) {
std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string);
EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert")));
EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv"));
EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.15, 0.15}))
<< pre_hlo_string;
absl::StatusOr<bool> filecheck_result =
RunFileCheck(optimized_hlo_string, custom_call_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
filecheck_result =
RunFileCheck(optimized_hlo_string, serialized_graph_string);
ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status();
EXPECT_TRUE(*filecheck_result);
} else {
std::string::size_type p0 = custom_call_string.find(':');
std::string::size_type p1 = custom_call_string.find("custom-call");
custom_call_string.erase(p0 + 1, p1 - p0 - 2);
p0 = custom_call_string.find(", dim_labels");
custom_call_string.erase(p0);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(pre_hlo_string));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, RunHloPass(ConvRewriter(se::CudaComputeCapability{
se::CudaComputeCapability::HOPPER, 0}),
module.get()));
EXPECT_TRUE(changed);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
custom_call_string);
RunAndFilecheckHloRewrite(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
CudnnFusedConvRewriter(
se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0},
GetDnnVersion(), GetToolkitVersion()),
serialized_graph_string);
}
}
void TestF8Parameterized(std::string template_pre_hlo_string,
std::string template_custom_call_string,
std::string template_serialized_graph_string) {
std::array<absl::string_view, 2> types = {"f8e4m3fn", "f8e5m2"};
std::array<absl::string_view, 2> clamp_lower = {"-448.", "-57344."};
std::array<absl::string_view, 2> clamp_upper = {"448.", "57344."};
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
for (int i = 0; i < 2; ++i) {
replacements["<<InputType>>"] = types[i];
for (int j = 0; j < 2; ++j) {
replacements["<<FilterType>>"] = types[j];
for (int k = 0; k < 2; ++k) {
replacements["<<OutputType>>"] = types[k];
replacements["<<ClampLower>>"] = clamp_lower[k];
replacements["<<ClampUpper>>"] = clamp_upper[k];
TestF8(absl::StrReplaceAll(template_pre_hlo_string, replacements),
absl::StrReplaceAll(template_custom_call_string, replacements),
absl::StrReplaceAll(template_serialized_graph_string,
replacements));
}
}
}
}
};
#define MAYBE_SKIP_TEST(CAUSE) \
do { \
if (absl::string_view(CAUSE) == "F8" && IsCuda() && \
(GetToolkitVersion() < se::SemanticVersion{12, 0, 0} || \
GetDnnVersion() < se::dnn::VersionInfo(8, 9, 0))) { \
GTEST_SKIP() << "FP8 convolutions require CUDA 12 and cuDNN 8.9."; \
} \
if (!IsCuda()) { \
GTEST_SKIP() << CAUSE " fusion is only supported on CUDA."; \
} \
} while (0)
TEST_F(CudnnFusedConvRewriterTest, TestConvOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseReluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
ROOT relu = TYPE[1,17,9,9] maximum(zeros, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, Test3D) {
std::string body = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,5,7,64] broadcast(zero), dimensions={}
input = TYPE[1,3,5,7,64] parameter(0)
filter = TYPE[3,3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,5,7,64] convolution(input, filter), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f, feature_group_count=1
broadcasted_bias = TYPE[1,3,5,7,64] broadcast(bias), dimensions={4}
add1 = TYPE[1,3,5,7,64] add(conv, broadcasted_bias)
)";
std::string relu = R"(
ROOT relu = TYPE[1,3,5,7,64] maximum(zeros, add1)
})";
std::string elu = R"(
cmp = pred[1,3,5,7,64] compare(add1, zeros), direction=GT
expm1 = TYPE[1,3,5,7,64] exponential-minus-one(add1)
ROOT elu = TYPE[1,3,5,7,64] select(cmp, add1, expm1)
})";
TestMatchWithAllTypes(body + relu);
if (!IsCuda()) TestMatchWithAllTypes(body + elu);
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasMultiCall) {
std::string code = R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,<<<format>>>,64] broadcast(zero), dimensions={}
input = TYPE[1,<<<format>>>,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,<<<format>>>,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,<<<format>>>,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,<<<format>>>,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,<<<format>>>,64] maximum(zeros, add1)
})";
absl::flat_hash_map<absl::string_view, absl::string_view> replacements;
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "5,5";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
replacements["<<<format>>>"] = "3,3";
TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements));
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasNoRelu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
ROOT add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseBiasWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestElu) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseEluWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
expm1 = TYPE[1,3,3,64] exponential-minus-one(sum)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
six = TYPE[] constant(6)
sixes = TYPE[1,3,3,64] broadcast(six), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
ROOT relu6 = TYPE[1,3,3,64] clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestRelu6OddChannels) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zeros = TYPE[1,384,1024,32] broadcast(TYPE[] constant(0)), dimensions={}
sixes = TYPE[1,384,1024,32] broadcast(TYPE[] constant(6)), dimensions={}
input = TYPE[1,769,2049,3] parameter(0)
filter = TYPE[32,3,3,3] parameter(1)
bias = TYPE[32] parameter(2)
conv = TYPE[1,384,1024,32] convolution(input, filter), window={size=3x3 stride=2x2}, dim_labels=b01f_o01i->b01f
broadcasted_bias = TYPE[1,384,1024,32] broadcast(bias), dimensions={3}
sum = add(conv, broadcasted_bias)
ROOT relu6 = clamp(zeros, sum, sixes)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestLeakyRelu) {
if (IsCuda() && !GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP()
<< "Conv-Bias-LeakyRelu fusion is supported and recommended with "
"the Nvidia Ampere+ GPUs.";
}
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha = TYPE[] constant(0.2)
alphas = TYPE[1,3,3,64] broadcast(alpha), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
bias = TYPE[64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
sum = TYPE[1,3,3,64] add(conv, broadcasted_bias)
cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT
mul = TYPE[1,3,3,64] multiply(sum, alphas)
ROOT elu = TYPE[1,3,3,64] select(cmp, sum, mul)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestSideInputOnly) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseSideInputWithDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
add1 = TYPE[1,3,3,64] add(conv, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestBiasAndSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConv) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = TYPE[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,32,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseScaledDepthwiseConv) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,1,17] parameter(1)
conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17
alpha_conv = TYPE[1,17,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = TYPE[1,17,9,9] multiply(conv, alpha_conv)
ROOT relu = TYPE[1,17,9,9] maximum(zeros, scaled_conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestNoCrashOnInf) {
EXPECT_TRUE(RunAndCompare(R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(inf)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
alpha_conv = f32[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={}
scaled_conv = f32[1,32,9,9] multiply(conv, alpha_conv)
ROOT relu = f32[1,32,9,9] maximum(zeros, scaled_conv)
})",
ErrorSpec{0.01}));
}
TEST_F(CudnnFusedConvRewriterTest, TestConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, DontFuseDepthwiseConvWithScaledSideInput) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,1,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInput) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
add1 = TYPE[1,3,3,64] add(scaled_conv, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInputWithBias) {
TestMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
zero = TYPE[] constant(0)
zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = TYPE[] constant(0.999994934)
alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = TYPE[] constant(0.899994934)
alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = TYPE[1,3,3,64] parameter(0)
filter = TYPE[3,3,64,64] parameter(1)
side_input = TYPE[1,3,3,64] parameter(2)
bias = TYPE[64] parameter(3)
conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv)
scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input)
broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3}
add1 = TYPE[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = TYPE[1,3,3,64] add(add1, scaled_side_input)
ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2)
})");
}
TEST_F(CudnnFusedConvRewriterTest, TestMatchMaxZeroOnly) {
TestNotMatchWithAllTypes(R"(
HloModule Test
ENTRY Test {
point_one = TYPE[] constant(0.1)
point_ones = TYPE[1,32,9,9] broadcast(point_one), dimensions={}
input = TYPE[1,17,9,9] parameter(0)
filter = TYPE[3,3,17,32] parameter(1)
conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT relu = TYPE[1,32,9,9] maximum(point_ones, conv)
})");
}
TEST_F(CudnnFusedConvRewriterTest, PreservesMetadata) {
const char* kHloString = R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
input = f32[1,17,9,9] parameter(0)
filter = f32[3,3,17,32] parameter(1)
conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1, metadata={op_type="foo" op_name="bar"}
ROOT relu = f32[1,32,9,9] maximum(zeros, conv)
})";
const std::string optimized_hlo_string =
backend()
.compiler()
->RunHloPasses(
ParseAndReturnVerifiedModule(kHloString, GetModuleConfigForTest())
.value(),
backend().default_stream_executor(), backend().memory_allocator())
.value()
->ToString();
EXPECT_THAT(optimized_hlo_string,
::testing::ContainsRegex(
R"(custom-call.*metadata=\{op_type="foo" op_name="bar"\})"));
}
TEST_F(CudnnFusedConvRewriterTest, TestPreservesFeatureGroupCount) {
const char* kHloString = R"(
HloModule jaxpr_computation__6.19
primitive_computation__1.4 {
parameter.5 = f32[] parameter(0)
parameter.6 = f32[] parameter(1)
ROOT add.7 = f32[] add(parameter.5, parameter.6)
}
ENTRY jaxpr_computation__7.8 {
parameter.11 = f32[2,64,64,53]{3,2,1,0} parameter(1)
parameter.10 = f32[3,3,1,53]{3,2,1,0} parameter(0)
convolution.12 = f32[2,64,64,53]{3,2,1,0} convolution(parameter.11, parameter.10), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=53
constant.13 = f32[] constant(0)
broadcast.14 = f32[2,64,64,53]{3,2,1,0} broadcast(constant.13), dimensions={}
maximum.15 = f32[2,64,64,53]{3,2,1,0} maximum(convolution.12, broadcast.14)
ROOT reduce.17 = f32[] reduce(maximum.15, constant.13), dimensions={0,1,2,3}, to_apply=primitive_computation__1.4
}
)";
EXPECT_TRUE(RunAndCompare(kHloString, ErrorSpec{0.01}));
}
TEST_F(CudnnFusedConvRewriterTest, TestConvF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
ROOT conv_a = f8e4m3fn[1,16,6,6] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_f32 = f32[1,128,6,6] convert(input)
filter_f32 = f32[3,3,128,16] convert(filter)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvInvscaledOutputF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_f32 = f32[1,128,6,6] convert(input)
filter_f32 = f32[3,3,128,16] convert(filter)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_scaled = f32[1,16,6,6] divide(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledF8Parameterized) {
MAYBE_SKIP_TEST("F8");
TestF8Parameterized(
R"(
HloModule Test
ENTRY Test {
input = <<InputType>>[1,128,6,6] parameter(0)
filter = <<FilterType>>[3,3,128,16] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,128,6,6] convert(input)
input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[3,3,128,16] convert(filter)
filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast)
z_scale = f32[] parameter(4)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(<<ClampLower>>)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(<<ClampUpper>>)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
ROOT conv_f8 = <<OutputType>>[1,16,6,6] convert(conv_a_clamped)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledBiasF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,128,6,6] convert(input)
input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[3,3,128,16] convert(filter)
filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast)
bias = f32[1,16,6,6] parameter(4)
z_scale = f32[] parameter(5)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_bias = f32[1,16,6,6] add(conv_a, bias)
conv_a_scaled = f32[1,16,6,6] multiply(conv_a_bias, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledReluF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_f32 = f32[1,128,6,6] convert(input)
filter_f32 = f32[3,3,128,16] convert(filter)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
c = f32[] constant(0)
c_bcast = f32[1,16,6,6] broadcast(c), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
relu_a = f32[1,16,6,6] maximum(conv_a, c_bcast)
relu_a_scaled = f32[1,16,6,6] multiply(relu_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
relu_a_clamped = f32[1,16,6,6] clamp(c1_bcast, relu_a_scaled, c2_bcast)
ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(relu_a_clamped)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvAmaxF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,128,6,6] convert(input)
input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[3,3,128,16] convert(filter)
filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast)
conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
z_scale = f32[] parameter(4)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
conv_a_clamped_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped)
abs_conv_a = f32[1,16,6,6] abs(conv_a)
c0 = f32[] constant(-inf)
amax = f32[] reduce(abs_conv_a, c0), dimensions={0,1,2,3}, to_apply=apply
ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[]) tuple(conv_a_clamped_f8, amax)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvReluAmaxF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
apply {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT c = f32[] maximum(a, b)
}
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_scale = f32[] parameter(2)
input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={}
filter_scale = f32[] parameter(3)
filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={}
input_f32 = f32[1,128,6,6] convert(input)
input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast)
filter_f32 = f32[3,3,128,16] convert(filter)
filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast)
c = f32[] constant(0)
c_bcast = f32[1,16,6,6] broadcast(c), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
relu_a = f32[1,16,6,6] maximum(conv_a, c_bcast)
z_scale = f32[] parameter(4)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
relu_a_scaled = f32[1,16,6,6] multiply(relu_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
relu_a_clamped = f32[1,16,6,6] clamp(c1_bcast, relu_a_scaled, c2_bcast)
relu_a_clamped_f8 = f8e4m3fn[1,16,6,6] convert(relu_a_clamped)
abs_relu_a = f32[1,16,6,6] abs(relu_a)
c0 = f32[] constant(-inf)
amax = f32[] reduce(abs_relu_a, c0), dimensions={0,1,2,3}, to_apply=apply
ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[]) tuple(relu_a_clamped_f8, amax)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputMultipleUsersF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_f32 = f32[1,128,6,6] convert(input)
filter_f32 = f32[3,3,128,16] convert(filter)
z_scale0 = f32[] parameter(2)
z_scale0_bcast = f32[1,16,6,6] broadcast(z_scale0), dimensions={}
z_scale1 = f32[] parameter(3)
z_scale1_bcast = f32[1,16,6,6] broadcast(z_scale1), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_scaled0 = f32[1,16,6,6] multiply(conv_a, z_scale0_bcast)
conv_a_scaled1 = f32[1,16,6,6] multiply(conv_a, z_scale1_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped0 = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled0, c2_bcast)
conv_a_clamped1 = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled1, c2_bcast)
conv_a_convert0 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped0)
conv_a_convert1 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped1)
ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f8e4m3fn[1,16,6,6]) tuple(conv_a_convert0, conv_a_convert1)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputUnsupportedUserF8) {
MAYBE_SKIP_TEST("F8");
TestF8(
R"(
HloModule Test
ENTRY Test {
input = f8e4m3fn[1,128,6,6] parameter(0)
filter = f8e4m3fn[3,3,128,16] parameter(1)
input_f32 = f32[1,128,6,6] convert(input)
filter_f32 = f32[3,3,128,16] convert(filter)
z_scale = f32[] parameter(2)
z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={}
conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
conv_a_cos = f32[1,16,6,6] cosine(conv_a)
conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast)
c1 = f32[] constant(-448.)
c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={}
c2 = f32[] constant(448.)
c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={}
conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast)
conv_a_convert = f8e4m3fn[1,16,6,6] convert(conv_a_clamped)
ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[1,16,6,6]) tuple(conv_a_convert, conv_a_cos)
})",
R"(
)",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvInt8ToInt8) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = s8[] constant(0)
zeros = s8[1,32,9,9] broadcast(zero), dimensions={}
input = s8[1,17,9,9] parameter(0)
filter = s8[3,3,17,32] parameter(1)
inputs32 = s32[1,17,9,9] convert(input)
filters32 = s32[3,3,17,32] convert(filter)
conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
lower = s32[] constant(-128)
lowers = s32[1,32,9,9] broadcast(lower), dimensions={}
upper = s32[] constant(127)
uppers = s32[1,32,9,9] broadcast(upper), dimensions={}
clamp = s32[1,32,9,9] clamp(lowers, conv, uppers)
ROOT convert = s8[1,32,9,9] convert(clamp)
})",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToFloat) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s8[1,17,9,9] parameter(0)
filter = s8[3,3,17,32] parameter(1)
inputs32 = s32[1,17,9,9] convert(input)
filters32 = s32[3,3,17,32] convert(filter)
conv = s32[1,32,9,9] convolution(inputs32, filters32),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT convert = f32[1,32,9,9] convert(conv)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)
.WithShape(F32, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToInt8BiasSideInput) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0))
filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1))
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
side_input = f32[1,32,9,9] convert(s8[1,32,9,9] parameter(3))
conv = s32[1,32,9,9] convolution(input, filter),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
conv_f32 = f32[1,32,9,9] convert(conv)
ROOT root = s8[1,32,9,9] convert(clamp(f32[1,32,9,9] broadcast(f32[] constant(-128)),
add(add(conv_f32, bias), side_input),
f32[1,32,9,9] broadcast(f32[] constant(127))))
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1),
m::Parameter(2), m::Parameter(3)),
0)
.WithShape(S8, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterHloTest, TestReluAfterConvert) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0))
filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1))
conv = s32[1,32,9,9] convolution(input, filter),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
conv_s8 = s8[1,32,9,9] convert(clamp(s32[1,32,9,9] broadcast(s32[] constant(-128)),
conv,
s32[1,32,9,9] broadcast(s32[] constant(127))))
zeros = s8[1,32,9,9] broadcast(s8[] constant(0)), dimensions={}
ROOT root = s8[1,32,9,9] maximum(conv_s8, zeros)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0),
m::Parameter(1),
m::Broadcast(
m::ConstantEffectiveScalar(0).WithElementType(F32))),
0)
.WithShape(S8, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToFloatBiasSideInput) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s8[1,17,9,9] parameter(0)
filter = s8[3,3,17,32] parameter(1)
bias = f32[32] parameter(2)
bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1}
side_input_f32 = f32[1,32,9,9] parameter(3)
inputs32 = s32[1,17,9,9] convert(input)
filters32 = s32[3,3,17,32] convert(filter)
conv = s32[1,32,9,9] convolution(inputs32, filters32),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
conv_f32 = f32[1,32,9,9] convert(conv)
sum1 = add(conv_f32, bias_broadcast)
ROOT sum2 = add(sum1, side_input_f32)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1),
m::Parameter(2), m::Parameter(3)),
0)
.WithShape(F32, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterHloTest, Int8SideInputWithScaleAndReshape) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0))
filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1))
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
side_input_scale = f32[2592] broadcast(f32[] constant(0.25)), dimensions={}
side_input = f32[1,32,9,9] reshape(multiply(f32[2592] convert(s8[2592] parameter(3)), side_input_scale))
conv = s32[1,32,9,9] convolution(input, filter),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = s8[1,32,9,9] convert(clamp(f32[1,32,9,9] broadcast(f32[] constant(-128)),
add(add(f32[1,32,9,9] convert(conv), bias), side_input),
f32[1,32,9,9] broadcast(f32[] constant(127))))
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
HloPassFix<HloPassPipeline> simplify("simplify");
simplify.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions{});
simplify.AddPass<ReshapeMover>();
simplify.AddPass<ConvertMover>();
TF_ASSERT_OK(RunHloPass(&simplify, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv = nullptr;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0),
m::Parameter(1),
m::Parameter(2),
m::Reshape(m::Parameter(3)).WithShape(S8, {1, 32, 9, 9})),
0)
.WithShape(S8, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.side_input_scale(), 0.25);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseAlpha) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s8[1,17,9,9] parameter(0)
filter = s8[3,3,17,32] parameter(1)
inputs32 = s32[1,17,9,9] convert(input)
filters32 = s32[3,3,17,32] convert(filter)
alpha = f32[] constant(42)
alpha_broadcast = f32[1,32,9,9] broadcast(alpha), dimensions={}
conv = s32[1,32,9,9] convolution(inputs32, filters32),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
convert = f32[1,32,9,9] convert(conv)
ROOT root = multiply(convert, alpha_broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv = nullptr;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}),
0)
.WithShape(F32, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 42);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseRelu) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[32] parameter(2)
bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1}
zero = f32[] constant(0)
zeros = f32[1,32,9,9] broadcast(zero), dimensions={}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias_broadcast)
ROOT relu = maximum(sum, zeros)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F32, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseReluIfMultipleUses) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
zeros = f32[1,32,9,9] broadcast(f32[] constant(0)), dimensions={}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias)
relu = maximum(sum, zeros)
not_relu = minimum(sum, zeros)
ROOT root = tuple(relu, not_relu)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::MaximumAnyOrder(
m::Broadcast(m::ConstantEffectiveScalar(0)),
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F32, {1, 32, 9, 9})),
m::Minimum())));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseElu) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,16,9,9] parameter(0)
filters = f16[3,3,16,32] parameter(1)
bias = f16[32] parameter(2)
bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1}
zero = f16[] constant(0)
zeros = f16[1,32,9,9] broadcast(zero), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias_broadcast)
cmp = compare(sum, zeros), direction=GT
expm1 = exponential-minus-one(sum)
ROOT elu = select(cmp, sum, expm1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kElu);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseEluIfMultipleUses) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,16,9,9] parameter(0)
filters = f16[3,3,16,32] parameter(1)
bias = f16[32] parameter(2)
bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1}
zero = f16[] constant(0)
zeros = f16[1,32,9,9] broadcast(zero), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias_broadcast)
cmp = compare(sum, zeros), direction=GT
expm1 = exponential-minus-one(sum)
elu = select(cmp, sum, expm1)
not_elu = minimum(sum, zeros)
ROOT root = tuple(elu, not_elu)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
auto gte_pattern =
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9});
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Select(m::Compare(gte_pattern,
m::Broadcast(m::ConstantEffectiveScalar(0)))
.WithComparisonDirection(ComparisonDirection::kGt),
gte_pattern,
m::Op()
.WithPredicate(HloPredicateIsOp<HloOpcode::kExpm1>)
.WithOperand(0, gte_pattern)),
m::Minimum())));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseRelu6) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,18,9,9] parameter(0)
filters = f16[3,3,18,32] parameter(1)
bias = f16[32] parameter(2)
bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1}
zero = f16[] constant(0)
zeros = f16[1,32,9,9] broadcast(zero), dimensions={}
sixes = f16[1,32,9,9] broadcast(f16[] constant(6)), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias_broadcast)
ROOT relu = clamp(zeros, sum, sixes)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu6);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseRelu6IfMultipleUses) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,18,9,9] parameter(0)
filters = f16[3,3,18,32] parameter(1)
bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1}
zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={}
sixes = f16[1,32,9,9] broadcast(f16[] constant(6)), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias)
relu = clamp(zeros, sum, sixes)
not_relu = minimum(sum, zeros)
ROOT root = tuple(relu, not_relu)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(0)),
m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9}),
m::Broadcast(m::ConstantEffectiveScalar(6))),
m::Minimum())));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseLeakyRelu) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,16,9,9] parameter(0)
filters = f16[3,3,16,32] parameter(1)
bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1}
zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={}
alphas = f16[1,32,9,9] broadcast(f16[] constant(0.2)), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias)
cmp = compare(sum, zeros), direction=GT
mul = multiply(sum, alphas)
ROOT leaky_relu = select(cmp, sum, mul)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kLeakyRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseLeakyReluIfMultipleUses) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,16,9,9] parameter(0)
filters = f16[3,3,16,32] parameter(1)
bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1}
zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={}
alphas = f16[1,32,9,9] broadcast(f16[] constant(0.2)), dimensions={}
conv = f16[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, bias)
cmp = compare(sum, zeros), direction=GT
mul = multiply(sum, alphas)
leaky_relu = select(cmp, sum, mul)
not_leaky_relu = minimum(sum, zeros)
ROOT root = tuple(leaky_relu, not_leaky_relu)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
DebugOptions debug_opts = m->config().debug_options();
debug_opts.set_xla_gpu_use_runtime_fusion(true);
m->mutable_config().set_debug_options(debug_opts);
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
auto gte_pattern =
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F16, {1, 32, 9, 9});
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Select(m::Compare(gte_pattern,
m::Broadcast(m::ConstantEffectiveScalar(0)))
.WithComparisonDirection(ComparisonDirection::kGt)
.WithOneUse(),
gte_pattern,
m::Multiply(gte_pattern,
m::Broadcast(m::ConstantEffectiveScalar()))),
m::Minimum())));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseAlphaIfMultipleUsers) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
alpha = f32[1,32,9,9] broadcast(f32[] parameter(3)), dimensions={}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(multiply(alpha, conv), bias)
ROOT root = tuple(conv, sum)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv1;
const HloInstruction* conv2;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(&conv1), 0),
m::AddAnyOrder(m::Broadcast(m::Parameter(2)),
m::MultiplyAnyOrder(
m::Broadcast(m::Parameter(3)),
m::GetTupleElement(m::CustomCall(&conv2), 0))))));
EXPECT_EQ(conv1, conv2);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv1->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseBiasIfMultipleUsers) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = tuple(conv, add(conv, bias))
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv1;
const HloInstruction* conv2;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(&conv1), 0),
m::AddAnyOrder(m::Broadcast(m::Parameter(2)),
m::GetTupleElement(m::CustomCall(&conv2), 0)))));
EXPECT_EQ(conv1, conv2);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv1->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseSideInputThroughRelu) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
side_input = f32[1,32,9,9] parameter(2)
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
relu = maximum(conv, f32[1,32,9,9] broadcast(f32[] constant(0)))
ROOT root = add(relu, side_input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::AddAnyOrder(
m::Parameter(2),
m::GetTupleElement(
m::CustomCall(&conv, m::Parameter(0), m::Parameter(1),
m::Broadcast(m::ConstantEffectiveScalar(0))),
0))));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseBiasThroughRelu) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
relu = maximum(conv, f32[1,32,9,9] broadcast(f32[] constant(0)))
ROOT root = add(relu, bias)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::AddAnyOrder(
m::Broadcast(m::Parameter(2)),
m::GetTupleElement(m::CustomCall(
&conv, m::Parameter(0), m::Parameter(1),
m::Broadcast(m::ConstantEffectiveScalar(0)))))));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseSideInputIfMultipleUsers) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
side_input = f32[1,32,9,9] parameter(2)
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = tuple(conv, add(conv, side_input))
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv1;
const HloInstruction* conv2;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(&conv1), 0),
m::AddAnyOrder(m::Parameter(2),
m::GetTupleElement(m::CustomCall(&conv2), 0)))));
EXPECT_EQ(conv1, conv2);
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv1->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.conv_result_scale(), 1);
EXPECT_EQ(config.activation_mode(), se::dnn::kNone);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseConvertToF16IfMultipleUsers) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] convert(f16[1,17,9,9] parameter(0))
filters = f32[3,3,17,32] convert(f16[3,3,17,32] parameter(1))
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = tuple(conv, f16[1,32,9,9] convert(conv))
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv1;
const HloInstruction* conv2;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(&conv1), 0),
m::Convert(m::GetTupleElement(m::CustomCall(&conv2), 0)))));
EXPECT_EQ(conv1, conv2);
}
TEST_F(CudnnFusedConvRewriterHloTest, DontFuseToS8IfMultipleUsers) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] convert(s8[1,17,9,9] parameter(0))
filters = f32[3,3,17,32] convert(s8[3,3,17,32] parameter(1))
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
conv_s8 = s8[1,32,9,9] convert(clamp(
f32[1,32,9,9] broadcast(f32[] constant(-128)),
conv,
f32[1,32,9,9] broadcast(f32[] constant(127))))
ROOT root = tuple(conv, conv_s8)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv1;
const HloInstruction* conv2;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::GetTupleElement(m::CustomCall(&conv1), 0),
m::Convert(m::Clamp(m::Op(),
m::GetTupleElement(m::CustomCall(&conv2), 0),
m::Op())))));
EXPECT_EQ(conv1, conv2);
}
TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingS32ToF32) {
MAYBE_SKIP_TEST("I8");
const std::string_view module_str = R"(
HloModule Test
ENTRY test_entry {
inputs = s8[1, 17, 9, 9] parameter(0)
filters = s8[3, 3, 17, 32] parameter(1)
mult_op = f32[1, 32, 9, 9] parameter(2)
conv = s32[1, 32, 9, 9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
ROOT ret = multiply(f32[1, 32, 9, 9] convert(conv), mult_op)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv1 = nullptr;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)),
m::Parameter(2))));
}
TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingS8ToF32) {
MAYBE_SKIP_TEST("I8");
const std::string_view module_str = R"(
HloModule Test
ENTRY test_entry {
inputs = s8[1, 17, 9, 9] parameter(0)
filters = s8[3, 3, 17, 32] parameter(1)
mult_op = f32[1, 32, 9, 9] parameter(2)
conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
ROOT ret = multiply(f32[1, 32, 9, 9] convert(conv), mult_op)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv1 = nullptr;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)),
m::Parameter(2))));
}
TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingF32ToS8) {
MAYBE_SKIP_TEST("I8");
const std::string_view module_str = R"(
HloModule Test
ENTRY test_entry {
inputs = f32[1, 17, 9, 9] parameter(0)
filters = f32[3, 3, 17, 32] parameter(1)
mult_op = s8[1, 32, 9, 9] parameter(2)
conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
ROOT ret = multiply(s8[1, 32, 9, 9] convert(conv), mult_op)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv1 = nullptr;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)),
m::Parameter(2))));
}
TEST_F(CudnnFusedConvRewriterHloTest, DontRemoveConvertDuetoMultpleUser) {
const std::string_view module_str = R"(
HloModule Test
ENTRY test_entry {
inputs = f32[1, 17, 9, 9] parameter(0)
filters = f32[3, 3, 17, 32] parameter(1)
mult_op = s8[1, 32, 9, 9] parameter(2)
sub_op = s8[1, 32, 9, 9] parameter(3)
conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
another = subtract(s8[1, 32, 9, 9] convert(conv), sub_op)
ROOT ret = multiply(s8[1, 32, 9, 9] convert(conv), mult_op)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
HloInstruction* conv1 = nullptr;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Multiply(
m::Convert(m::GetTupleElement(m::CustomCall(&conv1))),
m::Parameter(2))));
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseBias) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[32] parameter(2)
bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = add(conv, bias_broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall({kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2)),
0)
.WithShape(F32, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseSideInput) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
side_input = f32[1,32,9,9] parameter(2)
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = add(conv, side_input)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1),
m::Broadcast(m::ConstantEffectiveScalar(0))
.WithShape(F32, {32}),
m::Parameter(2)),
0)
.WithShape(F32, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseScaledSideInput) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
side_input = f32[1,32,9,9] parameter(2)
side_input_scale = f32[] constant(42)
side_input_scale_broadcast = f32[1,32,9,9] broadcast(side_input_scale), dimensions={}
side_input_product = multiply(side_input, side_input_scale_broadcast)
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = add(conv, side_input_product)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1),
m::Broadcast(m::ConstantEffectiveScalar(0))
.WithShape(F32, {32}),
m::Parameter(2)),
0)
.WithShape(F32, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 42);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseBiasAndSideInput) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[32] parameter(2)
side_input = f32[1,32,9,9] parameter(3)
bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, side_input)
ROOT sum2 = add(sum, bias_broadcast)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2),
m::Parameter(3)),
0)
.WithShape(F32, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, EffectiveScalarBias) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] parameter(0)
filters = f32[3,3,17,32] parameter(1)
bias = f32[1,32,9,9] broadcast(f32[] parameter(2)), dimensions={}
conv = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
ROOT root = add(conv, bias)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1),
m::Broadcast(m::Parameter(2)).WithShape(F32, {32})),
0)
.WithShape(F32, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterHloTest, StrengthReduceF32ToF16) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,17,9,9] parameter(0)
filters = f16[3,3,17,32] parameter(1)
bias = f16[32] parameter(2)
side_input = f16[1,32,9,9] parameter(3)
inputs_f32 = f32[1,17,9,9] convert(inputs)
filters_f32 = f32[3,3,17,32] convert(filters)
bias_f32 = f32[32] convert(bias)
bias_broadcast = f32[1,32,9,9] broadcast(bias_f32), dimensions={1}
side_input_f32 = f32[1,32,9,9] convert(side_input)
conv = f32[1,32,9,9] convolution(inputs_f32, filters_f32),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, side_input_f32)
sum2 = add(sum, bias_broadcast)
ROOT conv_f16 = f16[1,32,9,9] convert(sum2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Parameter(1), m::Parameter(2),
m::Parameter(3)),
0)
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, BroadcastReshapeTransposeAfterConvert) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f32[1,17,9,9] reshape(f32[1377] convert(f16[1377] parameter(0)))
filters = f32[3,3,17,32] transpose(f32[17,32,3,3] convert(f16[17,32,3,3] parameter(1))), dimensions={2,3,0,1}
bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1}
side_input = f16[1,32,9,9] reshape(f16[2592] parameter(3))
conv_f32 = f32[1,32,9,9] convolution(inputs, filters),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
conv_f16 = f16[1,32,9,9] convert(conv_f32)
ROOT root = f16[1,32,9,9] add(add(conv_f16, side_input), bias)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Convert(m::Reshape(m::Convert(m::Parameter(0))))
.WithElementType(F16),
m::Convert(m::Transpose(m::Convert(m::Parameter(1))))
.WithElementType(F16),
m::Parameter(2), m::Reshape(m::Parameter(3))),
0)
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, NoStrengthReduceF32ToF16IfBiasIsF32) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,17,9,9] parameter(0)
filters = f16[3,3,17,32] parameter(1)
bias = f32[32] parameter(2)
side_input = f16[1,32,9,9] parameter(3)
inputs_f32 = f32[1,17,9,9] convert(inputs)
filters_f32 = f32[3,3,17,32] convert(filters)
bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1}
side_input_f32 = f32[1,32,9,9] convert(side_input)
conv = f32[1,32,9,9] convolution(inputs_f32, filters_f32),
window={size=3x3 pad=1_1x1_1},
dim_labels=bf01_01io->bf01
sum = add(conv, side_input_f32)
sum2 = add(sum, bias_broadcast)
ROOT conv_f16 = f16[1,32,9,9] convert(sum2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Convert(m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Convert(m::Parameter(0)).WithElementType(F32),
m::Convert(m::Parameter(1)).WithElementType(F32),
m::Parameter(2),
m::Convert(m::Parameter(3)).WithElementType(F32)),
0))
.WithShape(F16, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, F32Constants) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,2,2,2] parameter(0)
filters_f32 = f32[1,1,2,2] constant({{{{1, 2},{3, 4}}}})
bias = f16[2] parameter(1)
bias_f32 = f32[2] convert(bias)
side_input_f32 = f32[1,2,2,2] constant({{
{{0.5, 0.25}, {0.125, 0.0625}},
{{0.5, 0.25}, {0.125, 0.0625}}
}})
inputs_f32 = f32[1,2,2,2] convert(inputs)
bias_broadcast = f32[1,2,2,2] broadcast(bias_f32), dimensions={1}
conv = f32[1,2,2,2] convolution(inputs_f32, filters_f32),
window={size=1x1}, dim_labels=bf01_01io->bf01
sum = add(conv, side_input_f32)
sum2 = add(sum, bias_broadcast)
ROOT conv_f16 = f16[1,2,2,2] convert(sum2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
HloConstantFolding constant_folding;
TF_ASSERT_OK(RunHloPass(&constant_folding, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0), m::Constant().WithElementType(F16),
m::Parameter(1), m::Constant().WithElementType(F16)),
0)
.WithShape(F16, {1, 2, 2, 2})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, F32ConstantsNotLosslesslyConvertible) {
const std::string module_str = R"(
HloModule Test
ENTRY Test {
inputs = f16[1,2,2,2] parameter(0)
filters_f32 = f32[1,1,2,2] constant({{{{1, 2.123456789},{3, 4}}}})
bias = f16[2] parameter(1)
bias_f32 = f32[2] convert(bias)
side_input_f32 = f32[1,2,2,2] constant({{
{{0.1, 0.2}, {0.3, 0.4}},
{{0.5, 0.6}, {0.7, 0.8}}
}})
inputs_f32 = f32[1,2,2,2] convert(inputs)
bias_broadcast = f32[1,2,2,2] broadcast(bias_f32), dimensions={1}
conv = f32[1,2,2,2] convolution(inputs_f32, filters_f32),
window={size=1x1}, dim_labels=bf01_01io->bf01
sum = add(conv, side_input_f32)
sum2 = add(sum, bias_broadcast)
ROOT conv_f16 = f16[1,2,2,2] convert(sum2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
HloConstantFolding constant_folding;
TF_ASSERT_OK(RunHloPass(&constant_folding, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Convert(m::GetTupleElement(
m::CustomCall(
&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Convert(m::Parameter(0)).WithElementType(F32),
m::Constant().WithElementType(F32),
m::Convert(m::Parameter(1)).WithElementType(F32),
m::Constant().WithElementType(F32)),
0)
.WithShape(F32, {1, 2, 2, 2}))
.WithElementType(F16)));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.side_input_scale(), 1);
}
TEST_F(CudnnFusedConvRewriterHloTest, FuseReluBeforeConvert) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = s8[1,17,9,9] parameter(0)
filter = s8[3,3,17,32] parameter(1)
inputs32 = s32[1,17,9,9] convert(input)
filters32 = s32[3,3,17,32] convert(filter)
conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
zero = s32[] constant(0)
zeros = s32[1,32,9,9] broadcast(zero), dimensions={}
relu = maximum(conv, zeros)
lower = s32[] constant(-128)
lowers = s32[1,32,9,9] broadcast(lower), dimensions={}
upper = s32[] constant(127)
uppers = s32[1,32,9,9] broadcast(upper), dimensions={}
clamp = s32[1,32,9,9] clamp(lowers, relu, uppers)
ROOT convert = s8[1,32,9,9] convert(clamp)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0),
m::Parameter(1),
m::Broadcast(m::ConstantEffectiveScalar(0))
.WithShape(F32, {32})),
0)
.WithShape(S8, {1, 32, 9, 9})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
conv->backend_config<GpuBackendConfig>());
const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config();
EXPECT_EQ(config.activation_mode(), se::dnn::kRelu);
}
TEST_F(CudnnFusedConvRewriterHloTest, BiasTypeMatchesConvTypeIfFp) {
MAYBE_SKIP_TEST("F64");
const std::string module_str = R"(
HloModule Test
ENTRY Test {
input = f64[1,17,9,9] parameter(0)
filter = f64[3,3,17,32] parameter(1)
bias = f64[1,32,9,9] broadcast(f64[32] convert(f32[32] parameter(2))), dimensions={1}
conv = f64[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1
ROOT root = f64[1,32,9,9] add(conv, bias)
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ConvRewriter rewriter{GetCudaComputeCapability()};
TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status());
CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(),
GetToolkitVersion()};
TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status());
AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{});
TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status());
SCOPED_TRACE(m->ToString());
const HloInstruction* conv;
ASSERT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::GetTupleElement(
m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget},
m::Parameter(0),
m::Parameter(1),
m::Convert(m::Parameter(2)).WithShape(F64, {32})),
0)
.WithShape(F64, {1, 32, 9, 9})));
}
TEST_F(CudnnFusedConvRewriterTest, TestFusedConvInt8ToInt8) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,3,3,64] broadcast(zero), dimensions={}
input = s8[1,3,3,64] parameter(0)
filter = s8[3,3,64,64] parameter(1)
bias = f32[64] parameter(2)
inputs32 = s32[1,3,3,64] convert(input)
filters32 = s32[3,3,64,64] convert(filter)
conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
convfloat = f32[1,3,3,64] convert(conv)
broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3}
add1 = f32[1,3,3,64] add(convfloat, broadcasted_bias)
relu = f32[1,3,3,64] maximum(zeros, add1)
lower = f32[] constant(-128)
lowers = f32[1,3,3,64] broadcast(lower), dimensions={}
upper = f32[] constant(127)
uppers = f32[1,3,3,64] broadcast(upper), dimensions={}
clamp = f32[1,3,3,64] clamp(lowers, relu, uppers)
ROOT convert = s8[1,3,3,64] convert(clamp)
})",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, DISABLED_TestFusedConvInt8ToFloat) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,3,3,64] broadcast(zero), dimensions={}
input = s8[1,3,3,64] parameter(0)
filter = s8[3,3,64,64] parameter(1)
bias = f32[64] parameter(2)
inputs32 = s32[1,3,3,64] convert(input)
filters32 = s32[3,3,64,64] convert(filter)
conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
convfloat = f32[1,3,3,64] convert(conv)
broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3}
add1 = f32[1,3,3,64] add(convfloat, broadcasted_bias)
ROOT relu = f32[1,3,3,64] maximum(zeros, add1)
})",
R"(
; CHECK-LABEL: ENTRY %Test (input: s8[1,3,3,64], filter: s8[3,3,64,64], bias: f32[64]) -> f32[1,3,3,64] {
; CHECK: [[custom_call_0:%[^ ]+]]{{(\.[0-9])?}} = (f32[1,3,3,64]{3,2,1,0}, u8[{{[0-9]*}}]{0}) custom-call([[input_1:%[^ ]+]], [[copy_2:%[^ ]+]]{{(\.[0-9])?}}, [[bias_3:%[^ ]+]]), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward", backend_config=
; CHECK-NEXT: ROOT [[get_tuple_element_4:%[^ ]+]]{{(\.[0-9])?}} = f32[1,3,3,64]{3,2,1,0} get-tuple-element([[custom_call_0]]{{(\.[0-9])?}}), index=0
)");
}
TEST_F(CudnnFusedConvRewriterTest,
TestFusedConvWithScaledInt8SideInputBiasInt8ToInt8) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = f32[] constant(0.899994934)
alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = s8[1,3,3,64] parameter(0)
filter = s8[3,3,64,64] parameter(1)
side_input = s8[1,3,3,64] parameter(2)
bias = f32[64] parameter(3)
inputs32 = s32[1,3,3,64] convert(input)
filters32 = s32[3,3,64,64] convert(filter)
side_input_f32 = f32[1,3,3,64] convert(side_input)
conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
convfloat = f32[1,3,3,64] convert(conv)
scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv)
scaled_side_input = f32[1,3,3,64] multiply(side_input_f32, alpha_side_input)
broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3}
add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = f32[1,3,3,64] add(add1, scaled_side_input)
relu = f32[1,3,3,64] maximum(zeros, add2)
lower = f32[] constant(-128)
lowers = f32[1,3,3,64] broadcast(lower), dimensions={}
upper = f32[] constant(127)
uppers = f32[1,3,3,64] broadcast(upper), dimensions={}
clamp = f32[1,3,3,64] clamp(lowers, relu, uppers)
ROOT convert = s8[1,3,3,64] convert(clamp)
})",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest,
TestFusedConvWithScaledFloatSideInputBiasInt8ToInt8) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = f32[] constant(0.899994934)
alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = s8[1,3,3,64] parameter(0)
filter = s8[3,3,64,64] parameter(1)
side_input = f32[1,3,3,64] parameter(2)
bias = f32[64] parameter(3)
inputs32 = s32[1,3,3,64] convert(input)
filters32 = s32[3,3,64,64] convert(filter)
conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
convfloat = f32[1,3,3,64] convert(conv)
scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv)
scaled_side_input = f32[1,3,3,64] multiply(side_input, alpha_side_input)
broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3}
add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = f32[1,3,3,64] add(add1, scaled_side_input)
relu = f32[1,3,3,64] maximum(zeros, add2)
lower = f32[] constant(-128)
lowers = f32[1,3,3,64] broadcast(lower), dimensions={}
upper = f32[] constant(127)
uppers = f32[1,3,3,64] broadcast(upper), dimensions={}
clamp = f32[1,3,3,64] clamp(lowers, relu, uppers)
ROOT convert = s8[1,3,3,64] convert(clamp)
})",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest,
TestFusedConvWithScaledInt8SideInputBiasInt8ToFloat) {
MAYBE_SKIP_TEST("I8");
TestClamp(
R"(
HloModule Test
ENTRY Test {
zero = f32[] constant(0)
zeros = f32[1,3,3,64] broadcast(zero), dimensions={}
alpha_conv_scalar = f32[] constant(0.999994934)
alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={}
alpha_side_input_scalar = f32[] constant(0.899994934)
alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={}
input = s8[1,3,3,64] parameter(0)
filter = s8[3,3,64,64] parameter(1)
side_input = s8[1,3,3,64] parameter(2)
bias = f32[64] parameter(3)
inputs32 = s32[1,3,3,64] convert(input)
filters32 = s32[3,3,64,64] convert(filter)
side_input_f32 = f32[1,3,3,64] convert(side_input)
conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1
convfloat = f32[1,3,3,64] convert(conv)
scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv)
scaled_side_input = f32[1,3,3,64] multiply(side_input_f32, alpha_side_input)
broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3}
add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias)
add2 = f32[1,3,3,64] add(add1, scaled_side_input)
relu = f32[1,3,3,64] maximum(zeros, add2)
lower = f32[] constant(-128)
lowers = f32[1,3,3,64] broadcast(lower), dimensions={}
upper = f32[] constant(127)
uppers = f32[1,3,3,64] broadcast(upper), dimensions={}
ROOT clamp = f32[1,3,3,64] clamp(lowers, relu, uppers)
})",
R"(
)");
}
TEST_F(CudnnFusedConvRewriterTest, TestConvInt8ToInt8NoClamp) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test (input: s8[1,17,9,9], filter: s8[3,3,17,32]) -> s8[1,32,9,9] {
zero = s8[] constant(0)
zeros = s8[1,32,9,9]{3,2,1,0} broadcast(s8[] zero), dimensions={}
input = s8[1,17,9,9]{3,2,1,0} parameter(0)
filter = s8[3,3,17,32]{3,2,1,0} parameter(1)
custom-call = (s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,17,9,9]{3,2,1,0} input, s8[3,3,17,32]{3,2,1,0} filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config="{\"convResultScale\":1}"
get-tuple-element = s32[1,32,9,9]{3,2,1,0} get-tuple-element((s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call), index=0
convert = s8[1,32,9,9]{3,2,1,0} convert(s32[1,32,9,9]{3,2,1,0} get-tuple-element)
ROOT relu = s8[1,32,9,9]{3,2,1,0} maximum(s8[1,32,9,9]{3,2,1,0} zeros, s8[1,32,9,9]{3,2,1,0} convert)
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ASSERT_FALSE(CudnnFusedConvRewriter(GetCudaComputeCapability(),
GetDnnVersion(), GetToolkitVersion())
.Run(m.get())
.ok());
}
TEST_F(CudnnFusedConvRewriterTest, TestFusedConvInt8ToInt8NoClamp) {
MAYBE_SKIP_TEST("I8");
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test (input: s8[1,17,9,9], filter: s8[3,3,17,32]) -> s8[1,32,9,9] {
zero = s8[] constant(0)
zeros = s8[1,32,9,9]{3,2,1,0} broadcast(s8[] zero), dimensions={}
input = s8[1,17,9,9]{3,2,1,0} parameter(0)
filter = s8[3,3,17,32]{3,2,1,0} parameter(1)
custom-call = (s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,17,9,9]{3,2,1,0} input, s8[3,3,17,32]{3,2,1,0} filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config="{\"convResultScale\":1}"
get-tuple-element = s32[1,32,9,9]{3,2,1,0} get-tuple-element((s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call), index=0
convert = s8[1,32,9,9]{3,2,1,0} convert(s32[1,32,9,9]{3,2,1,0} get-tuple-element)
ROOT relu = s8[1,32,9,9]{3,2,1,0} maximum(s8[1,32,9,9]{3,2,1,0} zeros, s8[1,32,9,9]{3,2,1,0} convert)
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
ASSERT_FALSE(CudnnFusedConvRewriter(GetCudaComputeCapability(),
GetDnnVersion(), GetToolkitVersion())
.Run(m.get())
.ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bccc1600-dd25-4f2b-be39-df2417b732ee | cpp | google/quiche | event_forwarder | quiche/http2/adapter/event_forwarder.cc | quiche/http2/adapter/event_forwarder_test.cc | #include "quiche/http2/adapter/event_forwarder.h"
#include <string>
#include <utility>
namespace http2 {
namespace adapter {
EventForwarder::EventForwarder(ForwardPredicate can_forward,
spdy::SpdyFramerVisitorInterface& receiver)
: can_forward_(std::move(can_forward)), receiver_(receiver) {}
void EventForwarder::OnError(Http2DecoderAdapter::SpdyFramerError error,
std::string detailed_error) {
if (can_forward_()) {
receiver_.OnError(error, std::move(detailed_error));
}
}
void EventForwarder::OnCommonHeader(spdy::SpdyStreamId stream_id, size_t length,
uint8_t type, uint8_t flags) {
if (can_forward_()) {
receiver_.OnCommonHeader(stream_id, length, type, flags);
}
}
void EventForwarder::OnDataFrameHeader(spdy::SpdyStreamId stream_id,
size_t length, bool fin) {
if (can_forward_()) {
receiver_.OnDataFrameHeader(stream_id, length, fin);
}
}
void EventForwarder::OnStreamFrameData(spdy::SpdyStreamId stream_id,
const char* data, size_t len) {
if (can_forward_()) {
receiver_.OnStreamFrameData(stream_id, data, len);
}
}
void EventForwarder::OnStreamEnd(spdy::SpdyStreamId stream_id) {
if (can_forward_()) {
receiver_.OnStreamEnd(stream_id);
}
}
void EventForwarder::OnStreamPadLength(spdy::SpdyStreamId stream_id,
size_t value) {
if (can_forward_()) {
receiver_.OnStreamPadLength(stream_id, value);
}
}
void EventForwarder::OnStreamPadding(spdy::SpdyStreamId stream_id, size_t len) {
if (can_forward_()) {
receiver_.OnStreamPadding(stream_id, len);
}
}
spdy::SpdyHeadersHandlerInterface* EventForwarder::OnHeaderFrameStart(
spdy::SpdyStreamId stream_id) {
return receiver_.OnHeaderFrameStart(stream_id);
}
void EventForwarder::OnHeaderFrameEnd(spdy::SpdyStreamId stream_id) {
if (can_forward_()) {
receiver_.OnHeaderFrameEnd(stream_id);
}
}
void EventForwarder::OnRstStream(spdy::SpdyStreamId stream_id,
spdy::SpdyErrorCode error_code) {
if (can_forward_()) {
receiver_.OnRstStream(stream_id, error_code);
}
}
void EventForwarder::OnSettings() {
if (can_forward_()) {
receiver_.OnSettings();
}
}
void EventForwarder::OnSetting(spdy::SpdySettingsId id, uint32_t value) {
if (can_forward_()) {
receiver_.OnSetting(id, value);
}
}
void EventForwarder::OnSettingsEnd() {
if (can_forward_()) {
receiver_.OnSettingsEnd();
}
}
void EventForwarder::OnSettingsAck() {
if (can_forward_()) {
receiver_.OnSettingsAck();
}
}
void EventForwarder::OnPing(spdy::SpdyPingId unique_id, bool is_ack) {
if (can_forward_()) {
receiver_.OnPing(unique_id, is_ack);
}
}
void EventForwarder::OnGoAway(spdy::SpdyStreamId last_accepted_stream_id,
spdy::SpdyErrorCode error_code) {
if (can_forward_()) {
receiver_.OnGoAway(last_accepted_stream_id, error_code);
}
}
bool EventForwarder::OnGoAwayFrameData(const char* goaway_data, size_t len) {
if (can_forward_()) {
return receiver_.OnGoAwayFrameData(goaway_data, len);
}
return false;
}
void EventForwarder::OnHeaders(spdy::SpdyStreamId stream_id,
size_t payload_length, bool has_priority,
int weight, spdy::SpdyStreamId parent_stream_id,
bool exclusive, bool fin, bool end) {
if (can_forward_()) {
receiver_.OnHeaders(stream_id, payload_length, has_priority, weight,
parent_stream_id, exclusive, fin, end);
}
}
void EventForwarder::OnWindowUpdate(spdy::SpdyStreamId stream_id,
int delta_window_size) {
if (can_forward_()) {
receiver_.OnWindowUpdate(stream_id, delta_window_size);
}
}
void EventForwarder::OnPushPromise(spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId promised_stream_id,
bool end) {
if (can_forward_()) {
receiver_.OnPushPromise(stream_id, promised_stream_id, end);
}
}
void EventForwarder::OnContinuation(spdy::SpdyStreamId stream_id,
size_t payload_length, bool end) {
if (can_forward_()) {
receiver_.OnContinuation(stream_id, payload_length, end);
}
}
void EventForwarder::OnAltSvc(
spdy::SpdyStreamId stream_id, absl::string_view origin,
const spdy::SpdyAltSvcWireFormat::AlternativeServiceVector& altsvc_vector) {
if (can_forward_()) {
receiver_.OnAltSvc(stream_id, origin, altsvc_vector);
}
}
void EventForwarder::OnPriority(spdy::SpdyStreamId stream_id,
spdy::SpdyStreamId parent_stream_id, int weight,
bool exclusive) {
if (can_forward_()) {
receiver_.OnPriority(stream_id, parent_stream_id, weight, exclusive);
}
}
void EventForwarder::OnPriorityUpdate(spdy::SpdyStreamId prioritized_stream_id,
absl::string_view priority_field_value) {
if (can_forward_()) {
receiver_.OnPriorityUpdate(prioritized_stream_id, priority_field_value);
}
}
bool EventForwarder::OnUnknownFrame(spdy::SpdyStreamId stream_id,
uint8_t frame_type) {
if (can_forward_()) {
return receiver_.OnUnknownFrame(stream_id, frame_type);
}
return false;
}
void EventForwarder::OnUnknownFrameStart(spdy::SpdyStreamId stream_id,
size_t length, uint8_t type,
uint8_t flags) {
if (can_forward_()) {
receiver_.OnUnknownFrameStart(stream_id, length, type, flags);
}
}
void EventForwarder::OnUnknownFramePayload(spdy::SpdyStreamId stream_id,
absl::string_view payload) {
if (can_forward_()) {
receiver_.OnUnknownFramePayload(stream_id, payload);
}
}
}
} | #include "quiche/http2/adapter/event_forwarder.h"
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/http2/adapter/http2_protocol.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/test_tools/mock_spdy_framer_visitor.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace adapter {
namespace test {
namespace {
constexpr absl::string_view some_data = "Here is some data for events";
constexpr spdy::SpdyStreamId stream_id = 1;
constexpr spdy::SpdyErrorCode error_code =
spdy::SpdyErrorCode::ERROR_CODE_ENHANCE_YOUR_CALM;
constexpr size_t length = 42;
TEST(EventForwarderTest, ForwardsEventsWithTruePredicate) {
spdy::test::MockSpdyFramerVisitor receiver;
receiver.DelegateHeaderHandling();
EventForwarder event_forwarder([]() { return true; }, receiver);
EXPECT_CALL(
receiver,
OnError(Http2DecoderAdapter::SpdyFramerError::SPDY_STOP_PROCESSING,
std::string(some_data)));
event_forwarder.OnError(
Http2DecoderAdapter::SpdyFramerError::SPDY_STOP_PROCESSING,
std::string(some_data));
EXPECT_CALL(receiver,
OnCommonHeader(stream_id, length, 0x0, END_STREAM_FLAG));
event_forwarder.OnCommonHeader(stream_id, length, 0x0,
END_STREAM_FLAG);
EXPECT_CALL(receiver, OnDataFrameHeader(stream_id, length, true));
event_forwarder.OnDataFrameHeader(stream_id, length, true);
EXPECT_CALL(receiver,
OnStreamFrameData(stream_id, some_data.data(), some_data.size()));
event_forwarder.OnStreamFrameData(stream_id, some_data.data(),
some_data.size());
EXPECT_CALL(receiver, OnStreamEnd(stream_id));
event_forwarder.OnStreamEnd(stream_id);
EXPECT_CALL(receiver, OnStreamPadLength(stream_id, length));
event_forwarder.OnStreamPadLength(stream_id, length);
EXPECT_CALL(receiver, OnStreamPadding(stream_id, length));
event_forwarder.OnStreamPadding(stream_id, length);
EXPECT_CALL(receiver, OnHeaderFrameStart(stream_id));
spdy::SpdyHeadersHandlerInterface* handler =
event_forwarder.OnHeaderFrameStart(stream_id);
EXPECT_EQ(handler, receiver.ReturnTestHeadersHandler(stream_id));
EXPECT_CALL(receiver, OnHeaderFrameEnd(stream_id));
event_forwarder.OnHeaderFrameEnd(stream_id);
EXPECT_CALL(receiver, OnRstStream(stream_id, error_code));
event_forwarder.OnRstStream(stream_id, error_code);
EXPECT_CALL(receiver, OnSettings());
event_forwarder.OnSettings();
EXPECT_CALL(
receiver,
OnSetting(spdy::SpdyKnownSettingsId::SETTINGS_MAX_CONCURRENT_STREAMS,
100));
event_forwarder.OnSetting(
spdy::SpdyKnownSettingsId::SETTINGS_MAX_CONCURRENT_STREAMS, 100);
EXPECT_CALL(receiver, OnSettingsEnd());
event_forwarder.OnSettingsEnd();
EXPECT_CALL(receiver, OnSettingsAck());
event_forwarder.OnSettingsAck();
EXPECT_CALL(receiver, OnPing(42, false));
event_forwarder.OnPing(42, false);
EXPECT_CALL(receiver, OnGoAway(stream_id, error_code));
event_forwarder.OnGoAway(stream_id, error_code);
EXPECT_CALL(receiver, OnGoAwayFrameData(some_data.data(), some_data.size()));
event_forwarder.OnGoAwayFrameData(some_data.data(), some_data.size());
EXPECT_CALL(receiver,
OnHeaders(stream_id, 1234,
false, 42, stream_id + 2,
false, true, true));
event_forwarder.OnHeaders(stream_id, 1234,
false, 42,
stream_id + 2, false, true,
true);
EXPECT_CALL(receiver, OnWindowUpdate(stream_id, 42));
event_forwarder.OnWindowUpdate(stream_id, 42);
EXPECT_CALL(receiver, OnPushPromise(stream_id, stream_id + 1, true));
event_forwarder.OnPushPromise(stream_id, stream_id + 1, true);
EXPECT_CALL(receiver,
OnContinuation(stream_id, 42, true));
event_forwarder.OnContinuation(stream_id, 42,
true);
const spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
EXPECT_CALL(receiver, OnAltSvc(stream_id, some_data, altsvc_vector));
event_forwarder.OnAltSvc(stream_id, some_data, altsvc_vector);
EXPECT_CALL(receiver, OnPriority(stream_id, stream_id + 2, 42,
false));
event_forwarder.OnPriority(stream_id, stream_id + 2, 42,
false);
EXPECT_CALL(receiver, OnPriorityUpdate(stream_id, some_data));
event_forwarder.OnPriorityUpdate(stream_id, some_data);
EXPECT_CALL(receiver, OnUnknownFrame(stream_id, 0x4D));
event_forwarder.OnUnknownFrame(stream_id, 0x4D);
EXPECT_CALL(receiver, OnUnknownFrameStart(stream_id, 42,
0x4D, 0x0));
event_forwarder.OnUnknownFrameStart(stream_id, 42, 0x4D,
0x0);
}
TEST(EventForwarderTest, DoesNotForwardEventsWithFalsePredicate) {
spdy::test::MockSpdyFramerVisitor receiver;
receiver.DelegateHeaderHandling();
EventForwarder event_forwarder([]() { return false; }, receiver);
EXPECT_CALL(receiver, OnError).Times(0);
event_forwarder.OnError(
Http2DecoderAdapter::SpdyFramerError::SPDY_STOP_PROCESSING,
std::string(some_data));
EXPECT_CALL(receiver, OnCommonHeader).Times(0);
event_forwarder.OnCommonHeader(stream_id, length, 0x0,
END_STREAM_FLAG);
EXPECT_CALL(receiver, OnDataFrameHeader).Times(0);
event_forwarder.OnDataFrameHeader(stream_id, length, true);
EXPECT_CALL(receiver, OnStreamFrameData).Times(0);
event_forwarder.OnStreamFrameData(stream_id, some_data.data(),
some_data.size());
EXPECT_CALL(receiver, OnStreamEnd).Times(0);
event_forwarder.OnStreamEnd(stream_id);
EXPECT_CALL(receiver, OnStreamPadLength).Times(0);
event_forwarder.OnStreamPadLength(stream_id, length);
EXPECT_CALL(receiver, OnStreamPadding).Times(0);
event_forwarder.OnStreamPadding(stream_id, length);
EXPECT_CALL(receiver, OnHeaderFrameStart(stream_id));
spdy::SpdyHeadersHandlerInterface* handler =
event_forwarder.OnHeaderFrameStart(stream_id);
EXPECT_EQ(handler, receiver.ReturnTestHeadersHandler(stream_id));
EXPECT_CALL(receiver, OnHeaderFrameEnd).Times(0);
event_forwarder.OnHeaderFrameEnd(stream_id);
EXPECT_CALL(receiver, OnRstStream).Times(0);
event_forwarder.OnRstStream(stream_id, error_code);
EXPECT_CALL(receiver, OnSettings).Times(0);
event_forwarder.OnSettings();
EXPECT_CALL(receiver, OnSetting).Times(0);
event_forwarder.OnSetting(
spdy::SpdyKnownSettingsId::SETTINGS_MAX_CONCURRENT_STREAMS, 100);
EXPECT_CALL(receiver, OnSettingsEnd).Times(0);
event_forwarder.OnSettingsEnd();
EXPECT_CALL(receiver, OnSettingsAck).Times(0);
event_forwarder.OnSettingsAck();
EXPECT_CALL(receiver, OnPing).Times(0);
event_forwarder.OnPing(42, false);
EXPECT_CALL(receiver, OnGoAway).Times(0);
event_forwarder.OnGoAway(stream_id, error_code);
EXPECT_CALL(receiver, OnGoAwayFrameData).Times(0);
event_forwarder.OnGoAwayFrameData(some_data.data(), some_data.size());
EXPECT_CALL(receiver, OnHeaders).Times(0);
event_forwarder.OnHeaders(stream_id, 1234,
false, 42,
stream_id + 2, false, true,
true);
EXPECT_CALL(receiver, OnWindowUpdate).Times(0);
event_forwarder.OnWindowUpdate(stream_id, 42);
EXPECT_CALL(receiver, OnPushPromise).Times(0);
event_forwarder.OnPushPromise(stream_id, stream_id + 1, true);
EXPECT_CALL(receiver, OnContinuation).Times(0);
event_forwarder.OnContinuation(stream_id, 42,
true);
EXPECT_CALL(receiver, OnAltSvc).Times(0);
const spdy::SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
event_forwarder.OnAltSvc(stream_id, some_data, altsvc_vector);
EXPECT_CALL(receiver, OnPriority).Times(0);
event_forwarder.OnPriority(stream_id, stream_id + 2, 42,
false);
EXPECT_CALL(receiver, OnPriorityUpdate).Times(0);
event_forwarder.OnPriorityUpdate(stream_id, some_data);
EXPECT_CALL(receiver, OnUnknownFrame).Times(0);
event_forwarder.OnUnknownFrame(stream_id, 0x4D);
EXPECT_CALL(receiver, OnUnknownFrameStart).Times(0);
event_forwarder.OnUnknownFrameStart(stream_id, 42, 0x4D,
0x0);
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/event_forwarder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/adapter/event_forwarder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
ff8c6da2-f799-4128-be68-719d72d110fb | cpp | tensorflow/tensorflow | op_segment | tensorflow/core/framework/op_segment.cc | tensorflow/core/framework/op_segment_test.cc | #include "tensorflow/core/framework/op_segment.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
OpSegment::Item::~Item() {
for (const auto& kv : name_kernel) delete kv.second;
}
OpSegment::OpSegment() {}
OpSegment::~OpSegment() {
for (const auto& kv : sessions_) delete kv.second;
}
Status OpSegment::FindOrCreate(const string& session_handle,
const string& node_name, OpKernel** kernel,
CreateKernelFn create_fn) {
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
*kernel = gtl::FindPtrOrNull(item->name_kernel, node_name);
if (*kernel != nullptr) {
return absl::OkStatus();
}
}
Status s = create_fn(kernel);
if (!s.ok()) {
LOG(ERROR) << "Create kernel failed: " << s;
return s;
}
{
mutex_lock l(mu_);
auto item = gtl::FindPtrOrNull(sessions_, session_handle);
if (item == nullptr) {
return errors::NotFound("Session ", session_handle, " is not found.");
}
OpKernel** p_kernel = &(item->name_kernel[node_name]);
if (*p_kernel == nullptr) {
*p_kernel = *kernel;
} else {
delete *kernel;
*kernel = *p_kernel;
}
}
return absl::OkStatus();
}
void OpSegment::AddHold(const string& session_handle) {
mutex_lock l(mu_);
Item** item = &sessions_[session_handle];
if (*item == nullptr) {
*item = new Item;
} else {
++((*item)->num_holds);
}
}
void OpSegment::RemoveHold(const string& session_handle) {
Item* item = nullptr;
{
mutex_lock l(mu_);
auto siter = sessions_.find(session_handle);
if (siter == sessions_.end()) {
VLOG(1) << "Session " << session_handle << " is not found.";
return;
}
item = siter->second;
if (--(item->num_holds) > 0) {
return;
} else {
sessions_.erase(siter);
}
}
delete item;
}
bool OpSegment::ShouldOwnKernel(FunctionLibraryRuntime* lib,
const string& node_op) {
return lib->IsStateful(node_op) &&
lib->GetFunctionLibraryDefinition()->Find(node_op) == nullptr &&
node_op != "PartitionedCall" && node_op != "StatefulPartitionedCall";
}
} | #include "tensorflow/core/framework/op_segment.h"
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
class OpSegmentTest : public ::testing::Test {
protected:
DeviceBase device_;
std::vector<NodeDef> int32_nodedefs_;
std::vector<NodeDef> float_nodedefs_;
OpSegmentTest() : device_(Env::Default()) {
for (int i = 0; i < 10; ++i) {
NodeDef def;
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_INT32)
.Input("y", 0, DT_INT32)
.Finalize(&def));
int32_nodedefs_.push_back(def);
TF_CHECK_OK(NodeDefBuilder(strings::StrCat("op", i), "Mul")
.Input("x", 0, DT_FLOAT)
.Input("y", 0, DT_FLOAT)
.Finalize(&def));
float_nodedefs_.push_back(def);
}
}
void ValidateOpAndTypes(OpKernel* op, const NodeDef& expected, DataType dt) {
ASSERT_NE(op, nullptr);
EXPECT_EQ(expected.DebugString(), op->def().DebugString());
EXPECT_EQ(2, op->num_inputs());
EXPECT_EQ(dt, op->input_type(0));
EXPECT_EQ(dt, op->input_type(1));
EXPECT_EQ(1, op->num_outputs());
EXPECT_EQ(dt, op->output_type(0));
}
OpSegment::CreateKernelFn GetFn(const NodeDef* ndef) {
return [this, ndef](OpKernel** kernel) {
Status s;
auto created = CreateOpKernel(DEVICE_CPU, &device_, cpu_allocator(),
*ndef, TF_GRAPH_DEF_VERSION, &s);
if (s.ok()) {
*kernel = created.release();
}
return s;
};
}
};
TEST_F(OpSegmentTest, Basic) {
OpSegment opseg;
OpKernel* op;
opseg.AddHold("A");
opseg.AddHold("B");
for (int i = 0; i < 10; ++i) {
auto* ndef = &float_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("A", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_FLOAT);
ndef = &int32_nodedefs_[i];
TF_EXPECT_OK(opseg.FindOrCreate("B", ndef->name(), &op, GetFn(ndef)));
ValidateOpAndTypes(op, *ndef, DT_INT32);
}
auto reterr = [](OpKernel** kernel) {
return errors::Internal("Should not be called");
};
for (int i = 0; i < 10; ++i) {
TF_EXPECT_OK(
opseg.FindOrCreate("A", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, float_nodedefs_[i], DT_FLOAT);
TF_EXPECT_OK(
opseg.FindOrCreate("B", strings::StrCat("op", i), &op, reterr));
ValidateOpAndTypes(op, int32_nodedefs_[i], DT_INT32);
}
opseg.RemoveHold("A");
opseg.RemoveHold("B");
}
TEST_F(OpSegmentTest, SessionNotFound) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
}
TEST_F(OpSegmentTest, CreateFailure) {
OpSegment opseg;
OpKernel* op;
NodeDef def = float_nodedefs_[0];
def.set_op("nonexistop");
opseg.AddHold("A");
Status s = opseg.FindOrCreate("A", def.name(), &op, GetFn(&def));
EXPECT_TRUE(errors::IsNotFound(s)) << s;
opseg.RemoveHold("A");
}
TEST_F(OpSegmentTest, AddRemoveHolds) {
OpSegment opseg;
OpKernel* op;
const auto& ndef = int32_nodedefs_[0];
opseg.RemoveHold("null");
opseg.AddHold("foo");
TF_EXPECT_OK(opseg.FindOrCreate("foo", ndef.name(), &op, GetFn(&ndef)));
opseg.AddHold("foo");
opseg.RemoveHold("foo");
ValidateOpAndTypes(op, ndef, DT_INT32);
opseg.RemoveHold("foo");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2594947-6568-4ee6-a346-5acce0f5b176 | cpp | tensorflow/tensorflow | registration | third_party/xla/xla/mlir/tools/mlir_interpreter/framework/registration.cc | tensorflow/core/framework/registration/registration_test.cc | #include "xla/mlir/tools/mlir_interpreter/framework/registration.h"
#include <cassert>
#include <functional>
#include <utility>
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter.h"
#include "xla/mlir/tools/mlir_interpreter/framework/interpreter_value.h"
namespace mlir {
namespace interpreter {
namespace detail {
namespace {
DenseMap<llvm::StringRef, llvm::StringRef>& GetOpAliases() {
static DenseMap<llvm::StringRef, llvm::StringRef>* aliases = nullptr;
if (!aliases) {
aliases = new DenseMap<llvm::StringRef, llvm::StringRef>();
}
return *aliases;
}
DenseMap<llvm::StringRef, InterpreterFunction>& GetFunctions() {
static DenseMap<llvm::StringRef, InterpreterFunction>* functions = nullptr;
if (!functions) {
functions = new DenseMap<llvm::StringRef, InterpreterFunction>();
}
return *functions;
}
}
InterpreterFunction GetFunction(llvm::StringRef name) {
const auto& fns = GetFunctions();
auto fn = fns.find(name);
if (fn != fns.end()) {
return fn->second;
}
const auto& aliases = GetOpAliases();
auto alias = aliases.find(name);
if (alias != aliases.end()) {
return fns.find(alias->second)->second;
}
return nullptr;
}
void RegisterInterpreterOp(llvm::StringRef name,
InterpreterValue (*fn)(const InterpreterValue&)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
assert(operands.size() == 1 && "unexpected number of operands");
return {fn(operands[0])};
});
}
void RegisterInterpreterOp(llvm::StringRef name,
InterpreterValue (*fn)(const InterpreterValue&,
const InterpreterValue&)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
assert(operands.size() == 2 && "unexpected number of operands");
return {fn(operands[0], operands[1])};
});
}
void RegisterInterpreterOp(
llvm::StringRef name,
InterpreterValue (*fn)(MutableArrayRef<InterpreterValue>)) {
RegisterInterpreterOp(
name,
[fn](MutableArrayRef<InterpreterValue> operands, mlir::Operation*,
InterpreterState&) -> SmallVector<InterpreterValue> {
return {fn(operands)};
});
}
void RegisterInterpreterOp(
llvm::StringRef name,
std::function<llvm::SmallVector<InterpreterValue>(
MutableArrayRef<InterpreterValue>, mlir::Operation*, InterpreterState&)>
fn) {
GetFunctions()[name] = std::move(fn);
}
void RegisterInterpreterOp(llvm::StringRef name, llvm::StringRef original) {
GetOpAliases()[name] = original;
}
}
}
} | #include "tensorflow/core/framework/registration/registration.h"
#include <gmock/gmock.h>
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::Eq;
#define STORE_NEXT_ID_IMPL(id, name) constexpr int name = id
#define STORE_NEXT_ID(name) TF_NEW_ID_FOR_INIT(STORE_NEXT_ID_IMPL, name)
STORE_NEXT_ID(kBaseId);
STORE_NEXT_ID(kNextId1);
STORE_NEXT_ID(kNextId2);
TEST(NewIdForInitTest, SequentialIds) {
static_assert(kBaseId >= 0, "kBaseId < 0");
static_assert(kNextId1 == kBaseId + 1, "kNextId1 != kBaseId+1");
static_assert(kNextId2 == kBaseId + 2, "kNextId2 != kBaseId+2");
}
int observed_unconditional_init;
InitOnStartupMarker const kUnconditionalInitMarker =
InitOnStartupMarker{} << []() {
observed_unconditional_init++;
return InitOnStartupMarker{};
};
TEST(InitOnStartupTest, Unconditional) {
EXPECT_THAT(observed_unconditional_init, Eq(1));
}
template <bool Enable>
int observed_conditional_init;
template <bool Enable>
InitOnStartupMarker const kConditionalInitMarker =
TF_INIT_ON_STARTUP_IF(Enable) << []() {
(observed_conditional_init<Enable>)++;
return InitOnStartupMarker{};
};
template InitOnStartupMarker const kConditionalInitMarker<true>;
template InitOnStartupMarker const kConditionalInitMarker<false>;
TEST(InitOnStartupTest, DISABLED_Conditional) {
EXPECT_THAT(observed_conditional_init<true>, Eq(1));
EXPECT_THAT(observed_conditional_init<false>, Eq(0));
}
template <bool Enable>
int observed_conditional_init_immediate;
template <bool Enable>
InitOnStartupMarker const kConditionalInitImmediateMarker =
TF_INIT_ON_STARTUP_IF(Enable) << ([]() {
(observed_conditional_init_immediate<Enable>)++;
return InitOnStartupMarker{};
})();
template InitOnStartupMarker const kConditionalInitImmediateMarker<true>;
template InitOnStartupMarker const kConditionalInitImmediateMarker<false>;
TEST(InitOnStartupTest, DISABLED_ConditionalImmediate) {
EXPECT_THAT(observed_conditional_init_immediate<true>, Eq(1));
EXPECT_THAT(observed_conditional_init_immediate<false>, Eq(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/mlir/tools/mlir_interpreter/framework/registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/registration/registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a08d2fce-61e5-473e-aede-407232bb0d24 | cpp | abseil/abseil-cpp | log_streamer | absl/log/log_streamer.h | absl/log/log_streamer_test.cc | #ifndef ABSL_LOG_LOG_STREAMER_H_
#define ABSL_LOG_LOG_STREAMER_H_
#include <ios>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/log/absl_log.h"
#include "absl/strings/internal/ostringstream.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class LogStreamer final {
public:
explicit LogStreamer(absl::LogSeverity severity, absl::string_view file,
int line)
: severity_(severity),
line_(line),
file_(file),
stream_(absl::in_place, &buf_) {
stream_->setf(std::ios_base::showbase | std::ios_base::boolalpha);
}
LogStreamer(LogStreamer&& that) noexcept
: severity_(that.severity_),
line_(that.line_),
file_(std::move(that.file_)),
buf_(std::move(that.buf_)),
stream_(std::move(that.stream_)) {
if (stream_.has_value()) stream_->str(&buf_);
that.stream_.reset();
}
LogStreamer& operator=(LogStreamer&& that) {
ABSL_LOG_IF(LEVEL(severity_), stream_).AtLocation(file_, line_) << buf_;
severity_ = that.severity_;
file_ = std::move(that.file_);
line_ = that.line_;
buf_ = std::move(that.buf_);
stream_ = std::move(that.stream_);
if (stream_.has_value()) stream_->str(&buf_);
that.stream_.reset();
return *this;
}
~LogStreamer() {
ABSL_LOG_IF(LEVEL(severity_), stream_.has_value()).AtLocation(file_, line_)
<< buf_;
}
std::ostream& stream() { return *stream_; }
private:
absl::LogSeverity severity_;
int line_;
std::string file_;
std::string buf_;
absl::optional<absl::strings_internal::OStringStream> stream_;
};
inline LogStreamer LogInfoStreamer(absl::string_view file, int line) {
return absl::LogStreamer(absl::LogSeverity::kInfo, file, line);
}
inline LogStreamer LogWarningStreamer(absl::string_view file, int line) {
return absl::LogStreamer(absl::LogSeverity::kWarning, file, line);
}
inline LogStreamer LogErrorStreamer(absl::string_view file, int line) {
return absl::LogStreamer(absl::LogSeverity::kError, file, line);
}
inline LogStreamer LogFatalStreamer(absl::string_view file, int line) {
return absl::LogStreamer(absl::LogSeverity::kFatal, file, line);
}
inline LogStreamer LogDebugFatalStreamer(absl::string_view file, int line) {
return absl::LogStreamer(absl::kLogDebugFatal, file, line);
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/log/log_streamer.h"
#include <ios>
#include <iostream>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/sysinfo.h"
#include "absl/base/log_severity.h"
#include "absl/log/internal/test_actions.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/strings/string_view.h"
namespace {
using ::absl::log_internal::DeathTestExpectedLogging;
using ::absl::log_internal::DeathTestUnexpectedLogging;
using ::absl::log_internal::DeathTestValidateExpectations;
#if GTEST_HAS_DEATH_TEST
using ::absl::log_internal::DiedOfFatal;
#endif
using ::absl::log_internal::InMatchWindow;
using ::absl::log_internal::LogSeverity;
using ::absl::log_internal::Prefix;
using ::absl::log_internal::SourceFilename;
using ::absl::log_internal::SourceLine;
using ::absl::log_internal::Stacktrace;
using ::absl::log_internal::TextMessage;
using ::absl::log_internal::ThreadID;
using ::absl::log_internal::Timestamp;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::IsTrue;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
void WriteToStream(absl::string_view data, std::ostream* os) {
*os << "WriteToStream: " << data;
}
void WriteToStreamRef(absl::string_view data, std::ostream& os) {
os << "WriteToStreamRef: " << data;
}
TEST(LogStreamerTest, LogInfoStreamer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kInfo)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), InMatchWindow(),
Eq(logging::proto::INFO), Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStream("foo", &absl::LogInfoStreamer("path/file.cc", 1234).stream());
}
TEST(LogStreamerTest, LogWarningStreamer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kWarning)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), InMatchWindow(),
Eq(logging::proto::WARNING), Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStream("foo",
&absl::LogWarningStreamer("path/file.cc", 1234).stream());
}
TEST(LogStreamerTest, LogErrorStreamer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), InMatchWindow(),
Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStream("foo", &absl::LogErrorStreamer("path/file.cc", 1234).stream());
}
#if GTEST_HAS_DEATH_TEST
TEST(LogStreamerDeathTest, LogFatalStreamer) {
EXPECT_EXIT(
{
absl::ScopedMockLog test_sink;
EXPECT_CALL(test_sink, Send)
.Times(AnyNumber())
.WillRepeatedly(DeathTestUnexpectedLogging());
EXPECT_CALL(test_sink,
Send(AllOf(SourceFilename(Eq("path/file.cc")),
SourceLine(Eq(1234)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234),
InMatchWindow(), Eq(logging::proto::FATAL),
Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(
R"pb(str: "WriteToStream: foo")pb")))))))
.WillOnce(DeathTestExpectedLogging());
test_sink.StartCapturingLogs();
WriteToStream("foo",
&absl::LogFatalStreamer("path/file.cc", 1234).stream());
},
DiedOfFatal, DeathTestValidateExpectations());
}
#endif
#ifdef NDEBUG
TEST(LogStreamerTest, LogDebugFatalStreamer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), InMatchWindow(),
Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStream("foo",
&absl::LogDebugFatalStreamer("path/file.cc", 1234).stream());
}
#elif GTEST_HAS_DEATH_TEST
TEST(LogStreamerDeathTest, LogDebugFatalStreamer) {
EXPECT_EXIT(
{
absl::ScopedMockLog test_sink;
EXPECT_CALL(test_sink, Send)
.Times(AnyNumber())
.WillRepeatedly(DeathTestUnexpectedLogging());
EXPECT_CALL(test_sink,
Send(AllOf(SourceFilename(Eq("path/file.cc")),
SourceLine(Eq(1234)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234),
InMatchWindow(), Eq(logging::proto::FATAL),
Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(
R"pb(str: "WriteToStream: foo")pb")))))))
.WillOnce(DeathTestExpectedLogging());
test_sink.StartCapturingLogs();
WriteToStream(
"foo", &absl::LogDebugFatalStreamer("path/file.cc", 1234).stream());
},
DiedOfFatal, DeathTestValidateExpectations());
}
#endif
TEST(LogStreamerTest, LogStreamer) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
Prefix(IsTrue()), LogSeverity(Eq(absl::LogSeverity::kError)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), InMatchWindow(),
Eq(logging::proto::ERROR), Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(R"pb(str: "WriteToStream: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStream(
"foo", &absl::LogStreamer(absl::LogSeverity::kError, "path/file.cc", 1234)
.stream());
}
#if GTEST_HAS_DEATH_TEST
TEST(LogStreamerDeathTest, LogStreamer) {
EXPECT_EXIT(
{
absl::ScopedMockLog test_sink;
EXPECT_CALL(test_sink, Send)
.Times(AnyNumber())
.WillRepeatedly(DeathTestUnexpectedLogging());
EXPECT_CALL(test_sink,
Send(AllOf(SourceFilename(Eq("path/file.cc")),
SourceLine(Eq(1234)), Prefix(IsTrue()),
LogSeverity(Eq(absl::LogSeverity::kFatal)),
Timestamp(InMatchWindow()),
ThreadID(Eq(absl::base_internal::GetTID())),
TextMessage(Eq("WriteToStream: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234),
InMatchWindow(), Eq(logging::proto::FATAL),
Eq(absl::base_internal::GetTID()),
ElementsAre(EqualsProto(
R"pb(str: "WriteToStream: foo")pb")))))))
.WillOnce(DeathTestExpectedLogging());
test_sink.StartCapturingLogs();
WriteToStream("foo", &absl::LogStreamer(absl::LogSeverity::kFatal,
"path/file.cc", 1234)
.stream());
},
DiedOfFatal, DeathTestValidateExpectations());
}
#endif
TEST(LogStreamerTest, PassedByReference) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
TextMessage(Eq("WriteToStreamRef: foo")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), _, _, _,
ElementsAre(EqualsProto(R"pb(str: "WriteToStreamRef: foo")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
WriteToStreamRef("foo", absl::LogInfoStreamer("path/file.cc", 1234).stream());
}
TEST(LogStreamerTest, StoredAsLocal) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
auto streamer = absl::LogInfoStreamer("path/file.cc", 1234);
WriteToStream("foo", &streamer.stream());
streamer.stream() << " ";
WriteToStreamRef("bar", streamer.stream());
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
TextMessage(Eq("WriteToStream: foo WriteToStreamRef: bar")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), _, _, _,
ElementsAre(EqualsProto(
R"pb(str: "WriteToStream: foo WriteToStreamRef: bar")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
}
#if GTEST_HAS_DEATH_TEST
TEST(LogStreamerDeathTest, StoredAsLocal) {
EXPECT_EXIT(
{
auto streamer = absl::LogFatalStreamer("path/file.cc", 1234);
std::cerr << "I'm still alive" << std::endl;
WriteToStream("foo", &streamer.stream());
},
DiedOfFatal, HasSubstr("I'm still alive"));
}
#endif
TEST(LogStreamerTest, LogsEmptyLine) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink,
Send(AllOf(SourceFilename(Eq("path/file.cc")),
SourceLine(Eq(1234)), TextMessage(Eq("")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), _, _, _,
ElementsAre(EqualsProto(R"pb(str: "")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
absl::LogInfoStreamer("path/file.cc", 1234);
}
#if GTEST_HAS_DEATH_TEST
TEST(LogStreamerDeathTest, LogsEmptyLine) {
EXPECT_EXIT(
{
absl::ScopedMockLog test_sink;
EXPECT_CALL(test_sink, Log)
.Times(AnyNumber())
.WillRepeatedly(DeathTestUnexpectedLogging());
EXPECT_CALL(
test_sink,
Send(AllOf(SourceFilename(Eq("path/file.cc")), TextMessage(Eq("")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), _, _, _, _,
ElementsAre(EqualsProto(R"pb(str: "")pb")))))))
.WillOnce(DeathTestExpectedLogging());
test_sink.StartCapturingLogs();
auto streamer = absl::LogFatalStreamer("path/file.cc", 1234);
},
DiedOfFatal, DeathTestValidateExpectations());
}
#endif
TEST(LogStreamerTest, MoveConstruction) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
LogSeverity(Eq(absl::LogSeverity::kInfo)),
TextMessage(Eq("hello 0x10 world 0x10")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), _, Eq(logging::proto::INFO), _,
ElementsAre(EqualsProto(R"pb(str: "hello 0x10 world 0x10")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
streamer1.stream() << "hello " << std::hex << 16;
absl::LogStreamer streamer2(std::move(streamer1));
streamer2.stream() << " world " << 16;
}
TEST(LogStreamerTest, MoveAssignment) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
testing::InSequence seq;
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file2.cc")), SourceLine(Eq(5678)),
LogSeverity(Eq(absl::LogSeverity::kWarning)),
TextMessage(Eq("something else")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file2.cc"), Eq(5678), _, Eq(logging::proto::WARNING), _,
ElementsAre(EqualsProto(R"pb(str: "something else")pb")))),
Stacktrace(IsEmpty()))));
EXPECT_CALL(
test_sink,
Send(AllOf(
SourceFilename(Eq("path/file.cc")), SourceLine(Eq(1234)),
LogSeverity(Eq(absl::LogSeverity::kInfo)),
TextMessage(Eq("hello 0x10 world 0x10")),
ENCODED_MESSAGE(MatchesEvent(
Eq("path/file.cc"), Eq(1234), _, Eq(logging::proto::INFO), _,
ElementsAre(EqualsProto(R"pb(str: "hello 0x10 world 0x10")pb")))),
Stacktrace(IsEmpty()))));
test_sink.StartCapturingLogs();
auto streamer1 = absl::LogInfoStreamer("path/file.cc", 1234);
streamer1.stream() << "hello " << std::hex << 16;
auto streamer2 = absl::LogWarningStreamer("path/file2.cc", 5678);
streamer2.stream() << "something else";
streamer2 = std::move(streamer1);
streamer2.stream() << " world " << 16;
}
TEST(LogStreamerTest, CorrectDefaultFlags) {
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(AllOf(TextMessage(Eq("false0xdeadbeef")))))
.Times(2);
test_sink.StartCapturingLogs();
absl::LogInfoStreamer("path/file.cc", 1234).stream()
<< false << std::hex << 0xdeadbeef;
LOG(INFO) << false << std::hex << 0xdeadbeef;
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_streamer.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/log_streamer_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
02bc84dc-502e-49cb-bc34-f1006c412337 | cpp | tensorflow/tensorflow | defuser | third_party/xla/xla/service/defuser.cc | third_party/xla/xla/service/defuser_test.cc | #include "xla/service/defuser.h"
#include <algorithm>
#include <memory>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
absl::StatusOr<bool> Defuser::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Defusing module " << module->name();
XLA_VLOG_LINES(2, "Before defusion:\n" + module->ToString());
bool changed = false;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
TF_RETURN_IF_ERROR(call_graph->VisitNodes(
[&](const CallGraphNode& call_graph_node) -> absl::Status {
if (call_graph_node.computation()->IsFusionComputation()) {
TF_RET_CHECK(call_graph_node.caller_callsites().size() == 1);
HloInstruction* fusion_instruction =
call_graph_node.caller_callsites()[0].instruction();
TF_RETURN_IF_ERROR(fusion_instruction->Defuse());
changed = true;
}
return absl::OkStatus();
},
true));
XLA_VLOG_LINES(2, "After defusion:\n" + module->ToString());
return changed;
}
} | #include "xla/service/defuser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
class DefuserTest : public HloTestBase {
protected:
int FusionCount(const HloModule* m) {
int count = 0;
for (HloComputation* computation : m->computations()) {
if (computation->IsFusionComputation()) {
count++;
}
}
return count;
}
Defuser defuser_;
const Shape shape_ = ShapeUtil::MakeShape(F32, {2, 2});
};
TEST_F(DefuserTest, NoFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
m->AddEntryComputation(builder.Build());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_FALSE(defuser_.Run(m.get()).value());
}
TEST_F(DefuserTest, TrivialFusionInstructionAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Parameter()));
}
TEST_F(DefuserTest, TrivialFusionInstructionNotAsRoot) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Fusion()));
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Negate(op::Add(op::Parameter(), op::Parameter())));
}
TEST_F(DefuserTest, NonTrivialFusionInstruction) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction(
{add2, constant, div, mul, sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(1, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, MultipleFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto param3 =
builder.AddInstruction(HloInstruction::CreateParameter(2, shape_, "p2"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kSubtract, add, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kMultiply, sub, param3));
auto div = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kDivide, mul, param3));
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, constant, div));
auto computation = m->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({add2, constant, div, mul},
HloInstruction::FusionKind::kLoop);
computation->CreateFusionInstruction({sub, negate, add},
HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Constant(), op::Divide()));
}
TEST_F(DefuserTest, NestedFusionInstructions) {
auto m = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape_, "p0"));
auto param1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape_, "p1"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(shape_, HloOpcode::kAdd, param0, param1));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(shape_, HloOpcode::kNegate, add));
auto computation = m->AddEntryComputation(builder.Build());
auto outer_fusion = computation->CreateFusionInstruction(
{negate, add}, HloInstruction::FusionKind::kLoop);
HloInstruction* fused_negate = outer_fusion->fused_expression_root();
ASSERT_EQ(fused_negate->opcode(), HloOpcode::kNegate);
outer_fusion->fused_instructions_computation()->CreateFusionInstruction(
{fused_negate}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(computation->root_instruction(), op::Fusion());
EXPECT_EQ(2, FusionCount(m.get()));
EXPECT_TRUE(defuser_.Run(m.get()).value());
EXPECT_EQ(0, FusionCount(m.get()));
EXPECT_THAT(computation->root_instruction(), op::Negate(op::Add()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/defuser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ba84d55-f639-42ee-a830-911c1b3bb133 | cpp | google/tensorstore | zlib | tensorstore/internal/compression/zlib.cc | tensorstore/internal/compression/zlib_test.cc | #include "tensorstore/internal/compression/zlib.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/internal/compression/cord_stream_manager.h"
#include <zlib.h>
namespace tensorstore {
namespace zlib {
namespace {
struct InflateOp {
static int Init(z_stream* s, [[maybe_unused]] int level, int header_option) {
return inflateInit2(s, 15
+ header_option);
}
static int Process(z_stream* s, int flags) { return inflate(s, flags); }
static int Destroy(z_stream* s) { return inflateEnd(s); }
static constexpr bool kDataErrorPossible = true;
};
struct DeflateOp {
static int Init(z_stream* s, int level, int header_option) {
return deflateInit2(s, level, Z_DEFLATED,
15
+ header_option,
8 ,
Z_DEFAULT_STRATEGY);
}
static int Process(z_stream* s, int flags) { return deflate(s, flags); }
static int Destroy(z_stream* s) { return deflateEnd(s); }
static constexpr bool kDataErrorPossible = false;
};
template <typename Op>
absl::Status ProcessZlib(const absl::Cord& input, absl::Cord* output, int level,
bool use_gzip_header) {
z_stream s = {};
internal::CordStreamManager<z_stream, 16 * 1024>
stream_manager(s, input, output);
const int header_option = use_gzip_header ? 16
: 0;
int err = Op::Init(&s, level, header_option);
if (err != Z_OK) {
ABSL_CHECK(false);
}
struct StreamDestroyer {
z_stream* s;
~StreamDestroyer() { Op::Destroy(s); }
} stream_destroyer{&s};
while (true) {
const bool input_complete = stream_manager.FeedInputAndOutputBuffers();
err = Op::Process(&s, input_complete ? Z_FINISH : Z_NO_FLUSH);
const bool made_progress = stream_manager.HandleOutput();
if (err == Z_OK) continue;
if (err == Z_BUF_ERROR && made_progress) continue;
break;
}
switch (err) {
case Z_STREAM_END:
if (!stream_manager.has_input_remaining()) {
return absl::OkStatus();
}
[[fallthrough]];
case Z_NEED_DICT:
case Z_DATA_ERROR:
case Z_BUF_ERROR:
if (!Op::kDataErrorPossible) {
ABSL_CHECK(false);
}
return absl::InvalidArgumentError("Error decoding zlib-compressed data");
default:
ABSL_CHECK(false);
}
ABSL_UNREACHABLE();
}
}
void Encode(const absl::Cord& input, absl::Cord* output,
const Options& options) {
ProcessZlib<DeflateOp>(input, output, options.level, options.use_gzip_header)
.IgnoreError();
}
absl::Status Decode(const absl::Cord& input, absl::Cord* output,
bool use_gzip_header) {
return ProcessZlib<InflateOp>(input, output, 0, use_gzip_header);
}
}
} | #include "tensorstore/internal/compression/zlib.h"
#include <cstddef>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
namespace zlib = tensorstore::zlib;
class ZlibCompressorTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(ZlibCompressorTestCases, ZlibCompressorTest,
::testing::Values(false, true));
TEST_P(ZlibCompressorTest, SmallRoundtrip) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result.Subcord(3, encode_result.size() - 3),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, SmallRoundtripFragmented) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input = absl::MakeFragmentedCord(
{"The quick", " brown fox", " jumped over", " ", "the lazy dog."});
absl::Cord encode_result("abc"), decode_result("def");
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 3);
EXPECT_EQ("abc", encode_result.Subcord(0, 3));
std::vector<std::string> encode_result_fragments;
for (size_t i = 3; i < encode_result.size(); ++i) {
encode_result_fragments.push_back(std::string(encode_result.Subcord(i, 1)));
}
TENSORSTORE_ASSERT_OK(
zlib::Decode(absl::MakeFragmentedCord(encode_result_fragments),
&decode_result, options.use_gzip_header));
EXPECT_EQ("def" + std::string(input), decode_result);
}
TEST_P(ZlibCompressorTest, LargeRoundtrip) {
const bool use_gzip_header = GetParam();
std::string input(100000, '\0');
unsigned char x = 0;
for (auto& v : input) {
v = x;
x += 7;
}
zlib::Options options{6, use_gzip_header};
absl::Cord encode_result, decode_result;
zlib::Encode(absl::Cord(input), &encode_result, options);
ASSERT_EQ(absl::OkStatus(), zlib::Decode(encode_result, &decode_result,
options.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, NonDefaultLevel) {
const bool use_gzip_header = GetParam();
zlib::Options options1{
0, use_gzip_header};
zlib::Options options2{9, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
absl::Cord encode_result1, encode_result2;
zlib::Encode(input, &encode_result1, options1);
zlib::Encode(input, &encode_result2, options2);
EXPECT_NE(encode_result1, encode_result2);
absl::Cord decode_result;
TENSORSTORE_ASSERT_OK(
zlib::Decode(encode_result2, &decode_result, options2.use_gzip_header));
EXPECT_EQ(input, decode_result);
}
TEST_P(ZlibCompressorTest, DecodeCorruptData) {
const bool use_gzip_header = GetParam();
zlib::Options options{6, use_gzip_header};
const absl::Cord input("The quick brown fox jumped over the lazy dog.");
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted[0] = 0;
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
{
absl::Cord encode_result, decode_result;
zlib::Encode(input, &encode_result, options);
ASSERT_GE(encode_result.size(), 1);
std::string corrupted(encode_result);
corrupted.resize(corrupted.size() - 1);
EXPECT_THAT(zlib::Decode(absl::Cord(corrupted), &decode_result,
options.use_gzip_header),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zlib.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/zlib_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
737f2727-23b2-402e-a0a0-5a2b7d539d6c | cpp | tensorflow/tensorflow | edgeset | tensorflow/core/graph/edgeset.cc | tensorflow/core/graph/edgeset_test.cc | #include "tensorflow/core/graph/edgeset.h"
namespace tensorflow {
std::pair<EdgeSet::const_iterator, bool> EdgeSet::insert(value_type value) {
RegisterMutation();
const_iterator ci;
ci.Init(this);
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == value) {
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, false);
}
}
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == nullptr) {
ptrs_[i] = value;
ci.array_iter_ = &ptrs_[i];
return std::make_pair(ci, true);
}
}
s = new gtl::FlatSet<const Edge*>;
s->insert(reinterpret_cast<const Edge**>(std::begin(ptrs_)),
reinterpret_cast<const Edge**>(std::end(ptrs_)));
ptrs_[0] = this;
ptrs_[1] = s;
}
auto p = s->insert(value);
ci.tree_iter_ = p.first;
return std::make_pair(ci, p.second);
}
EdgeSet::size_type EdgeSet::erase(key_type key) {
RegisterMutation();
auto s = get_set();
if (!s) {
for (int i = 0; i < kInline; i++) {
if (ptrs_[i] == key) {
size_t n = size();
ptrs_[i] = ptrs_[n - 1];
ptrs_[n - 1] = nullptr;
return 1;
}
}
return 0;
} else {
return s->erase(key);
}
}
} | #include "tensorflow/core/graph/edgeset.h"
#include <set>
#include <vector>
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class EdgeSetTest : public ::testing::Test {
public:
EdgeSetTest() : edges_(nullptr) {}
~EdgeSetTest() override { delete[] edges_; }
void MakeEdgeSet(int n) {
if (edges_) {
delete[] edges_;
}
edges_ = new Edge[n];
eset_.clear();
model_.clear();
for (int i = 0; i < n; i++) {
eset_.insert(&edges_[i]);
model_.insert(&edges_[i]);
}
}
void CheckSame() {
EXPECT_EQ(model_.size(), eset_.size());
EXPECT_EQ(model_.empty(), eset_.empty());
std::vector<const Edge*> modelv(model_.begin(), model_.end());
std::vector<const Edge*> esetv(eset_.begin(), eset_.end());
std::sort(modelv.begin(), modelv.end());
std::sort(esetv.begin(), esetv.end());
EXPECT_EQ(modelv.size(), esetv.size());
for (size_t i = 0; i < modelv.size(); i++) {
EXPECT_EQ(modelv[i], esetv[i]) << i;
}
}
static constexpr int kInline = 64 / sizeof(const void*);
Edge nonexistent_;
Edge* edges_;
EdgeSet eset_;
std::set<const Edge*> model_;
};
namespace {
TEST_F(EdgeSetTest, Ops) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
CheckSame();
EXPECT_EQ((n == 0), eset_.empty());
EXPECT_EQ(n, eset_.size());
eset_.clear();
model_.clear();
CheckSame();
eset_.insert(&edges_[0]);
model_.insert(&edges_[0]);
CheckSame();
}
}
TEST_F(EdgeSetTest, Exists) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
for (int pos = 0; pos < n; pos++) {
auto p = eset_.insert(&edges_[pos]);
EXPECT_FALSE(p.second);
EXPECT_EQ(&edges_[pos], *p.first);
EXPECT_EQ(1, eset_.erase(&edges_[pos]));
model_.erase(&edges_[pos]);
CheckSame();
}
}
}
TEST_F(EdgeSetTest, DoesNotExist) {
for (int n : {0, 1, 2, kInline + 1}) {
MakeEdgeSet(n);
EXPECT_EQ(0, eset_.erase(&nonexistent_));
auto p = eset_.insert(&nonexistent_);
EXPECT_TRUE(p.second);
EXPECT_EQ(&nonexistent_, *p.first);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/edgeset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/graph/edgeset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
179b2409-311b-4b45-94bb-c7d370c11b9f | cpp | tensorflow/tensorflow | conv_generic | tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_generic_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic.h"
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
namespace {
std::string GenerateUploadByThreads(
const std::string& local_ptr_name, const std::string& name, bool use_ptrs,
const std::string& global_offset_name, const std::string type_conversion,
const std::string& lid_name, int total_work_items, int elements_to_upload) {
std::string c;
std::string offset =
global_offset_name.empty() ? "" : global_offset_name + " + ";
const int groups = elements_to_upload / total_work_items;
const int reminder = elements_to_upload % total_work_items;
const std::string access_start = name + (use_ptrs ? "[" : ".Read(");
const std::string access_end = use_ptrs ? "]" : ")";
for (int i = 0; i < groups; ++i) {
const std::string value = access_start + offset + lid_name + " + " +
std::to_string(total_work_items * i) + access_end;
c += " " + local_ptr_name + "[" + lid_name + " + " +
std::to_string(total_work_items * i) +
"] = " + absl::Substitute(type_conversion, value) + ";\n";
}
if (reminder != 0) {
const std::string value = access_start + offset + lid_name + " + " +
std::to_string(total_work_items * groups) +
access_end;
c += " if (" + lid_name + " < " + std::to_string(reminder) + ") {\n";
c += " " + local_ptr_name + "[" + lid_name + " + " +
std::to_string(total_work_items * groups) +
"] = " + absl::Substitute(type_conversion, value) + ";\n";
c += " }\n";
}
return c;
}
std::string GenerateAsyncUpload(const std::string& local_ptr_name,
const std::string& global_ptr_name,
const std::string& global_offset_name,
int elements_to_upload) {
std::string c;
std::string offset =
global_offset_name.empty() ? "" : " + " + global_offset_name;
c += " async_work_group_copy(" + local_ptr_name + ", " + global_ptr_name +
offset + ", " + std::to_string(elements_to_upload) + ", 0);\n";
return c;
}
std::string GenerateBlockCoords(const int4& block_size,
const int3& work_group_launch_order,
bool linear_spatial, bool linear_all,
bool need_depth, bool need_batch) {
std::string c;
int3 launch_remap;
launch_remap[work_group_launch_order.x] = 0;
launch_remap[work_group_launch_order.y] = 1;
launch_remap[work_group_launch_order.z] = 2;
if (linear_all) {
c += " int linear_all = GLOBAL_ID_0;\n";
if (need_batch) {
c += " int B = linear_all % args.task_size_b;\n";
c += " linear_all = linear_all / args.task_size_b;\n";
}
c += " int DST_X = linear_all % args.task_size_x;\n";
c += " linear_all = linear_all / args.task_size_x;\n";
c += " int DST_Y = linear_all % args.task_size_y;\n";
c += " linear_all = linear_all / args.task_size_y;\n";
if (need_depth) {
c += " int DST_Z = linear_all % args.task_size_z;\n";
c += " linear_all = linear_all / args.task_size_z;\n";
}
c += " int DST_S = linear_all;\n";
} else if (linear_spatial) {
if (work_group_launch_order[0] == 0) {
c += " int linear_spatial = GLOBAL_ID_0;\n";
} else {
c += " int linear_spatial = GROUP_ID_" +
std::to_string(launch_remap[0]) + " * GROUP_SIZE_0 + LOCAL_ID_0;\n";
}
if (need_batch) {
c += " int B = linear_spatial % args.task_size_b;\n";
c += " linear_spatial = linear_spatial / args.task_size_b;\n";
}
c += " int DST_X = linear_spatial % args.task_size_x;\n";
c += " linear_spatial = linear_spatial / args.task_size_x;\n";
c += " int DST_Y = linear_spatial % args.task_size_y;\n";
c += " linear_spatial = linear_spatial / args.task_size_y;\n";
if (need_depth) {
c += " int DST_Z = linear_spatial;\n";
}
if (work_group_launch_order[1] == 1) {
c += " int DST_S = GLOBAL_ID_1;\n";
} else {
c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1;\n";
}
} else {
if (work_group_launch_order[0] == 0) {
c += " int DST_X = GLOBAL_ID_0;\n";
} else {
c += " int DST_X = GROUP_ID_" + std::to_string(launch_remap[0]) +
" * GROUP_SIZE_0 + LOCAL_ID_0;\n";
}
if (need_batch) {
c += " int B = DST_X % args.task_size_b;\n";
c += " DST_X = DST_X / args.task_size_b;\n";
}
std::string global_id_1;
if (work_group_launch_order[1] == 1) {
global_id_1 = "GLOBAL_ID_1";
} else {
global_id_1 = "GROUP_ID_" + std::to_string(launch_remap[1]) +
" * GROUP_SIZE_1 + LOCAL_ID_1";
}
if (need_depth) {
c += " int linear_id_1 = " + global_id_1 + ";\n";
c += " int DST_Y = linear_id_1 % args.task_size_y;\n";
c += " int DST_Z = linear_id_1 / args.task_size_y;\n";
} else {
c += " int DST_Y = " + global_id_1 + ";\n";
}
if (work_group_launch_order[2] == 2) {
c += " int DST_S = GLOBAL_ID_2;\n";
} else {
c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[2]) +
" * GROUP_SIZE_2 + LOCAL_ID_2;\n";
}
}
if (block_size.x != 1) {
c += " DST_X *= " + std::to_string(block_size.x) + ";\n";
}
if (block_size.y != 1) {
c += " DST_Y *= " + std::to_string(block_size.y) + ";\n";
}
if (need_depth && block_size.z != 1) {
c += " DST_Z *= " + std::to_string(block_size.z) + ";\n";
}
if (block_size.w != 1) {
c += " DST_S *= " + std::to_string(block_size.w) + ";\n";
}
return c;
}
}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution2DAttributes& attr,
const GpuInfo& gpu_info, const BHWC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, 1, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0),
kernel_size_(attr.weights.shape.w, attr.weights.shape.h, 1, 1),
dilation_(attr.dilations.w, attr.dilations.h, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {
const int src_slices = DivideRoundUp(attr.weights.shape.i, 4);
const int dst_slices = DivideRoundUp(attr.weights.shape.o, 4);
if (attr.groups != 1) {
conv_params_.groups_support = true;
const int dst_group_slices = dst_slices / attr.groups;
if (dst_group_slices % conv_params_.block_size.w != 0) {
if (conv_params_.block_size.w == 4 && dst_group_slices % 2 == 0) {
conv_params_.block_size.w = 2;
} else {
conv_params_.block_size.w = 1;
}
}
args_.AddInt("src_group_size", src_slices);
args_.AddInt("dst_group_size", dst_slices / attr.groups);
}
}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC& weights_shape, const GpuInfo& gpu_info,
const BHWC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, 1, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0),
kernel_size_(weights_shape.w, weights_shape.h, 1, 1),
dilation_(attr.dilations.w, attr.dilations.h, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, weights_shape,
dst_shape)) {}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const FullyConnectedAttributes& attr,
const GpuInfo& gpu_info, const BHWC* dst_shape)
: GPUOperation(definition),
stride_(1, 1, 1, 1),
padding_(0, 0, 0, 0),
kernel_size_(1, 1, 1, 1),
dilation_(1, 1, 1, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {}
ConvGeneric::ConvGeneric(const OperationDef& definition)
: GPUOperation(definition),
stride_(1, 1, 1, 1),
padding_(0, 0, 0, 0),
kernel_size_(1, 1, 1, 1),
dilation_(1, 1, 1, 1) {}
ConvGeneric::ConvGeneric(ConvGeneric&& operation)
: GPUOperation(std::move(operation)),
stride_(operation.stride_),
padding_(operation.padding_),
kernel_size_(operation.kernel_size_),
dilation_(operation.dilation_),
conv_params_(operation.conv_params_) {}
ConvGeneric::ConvGeneric(const OperationDef& definition,
const Convolution3DAttributes& attr,
const GpuInfo& gpu_info, const BHWDC* dst_shape)
: GPUOperation(definition),
stride_(attr.strides.w, attr.strides.h, attr.strides.d, 1),
padding_(-attr.padding.prepended.w, -attr.padding.prepended.h,
-attr.padding.prepended.d, 0),
kernel_size_(attr.weights.shape.w, attr.weights.shape.h,
attr.weights.shape.d, 1),
dilation_(attr.dilations.w, attr.dilations.h, attr.dilations.d, 1),
conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {}
ConvGeneric& ConvGeneric::operator=(ConvGeneric&& operation) {
if (this != &operation) {
std::swap(stride_, operation.stride_);
std::swap(padding_, operation.padding_);
std::swap(kernel_size_, operation.kernel_size_);
std::swap(dilation_, operation.dilation_);
std::swap(conv_params_, operation.conv_params_);
GPUOperation::operator=(std::move(operation));
}
return *this;
}
void ConvGeneric::GenerateCode(const GpuInfo& gpu_info) {
if (conv_params_.linear_all) {
grid_dimension_ = 1;
} else if (conv_params_.linear_spatial) {
grid_dimension_ = 2;
}
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
if (definition_.src_tensors.size() == 2) {
const DataType weights_type = definition_.GetDataType();
if (conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupI4O4 ||
conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupO4I4) {
definition_.src_tensors[1] = {weights_type, TensorStorageType::BUFFER,
Layout::HWC};
BufferDescriptor desc;
desc.element_type = weights_type;
desc.element_size = 4;
desc.memory_type = conv_params_.weights_upload_type ==
ConvGeneric::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
} else {
TensorDescriptor desc{weights_type, TensorStorageType::TEXTURE_2D,
Layout::HW};
definition_.src_tensors[1] = desc;
definition_.src_tensors.push_back(desc);
definition_.src_tensors.push_back(desc);
definition_.src_tensors.push_back(desc);
for (int i = 0; i < 4; ++i) {
const std::string name = "weights" + std::to_string(i);
AddSrcTensor(name, definition_.src_tensors[1 + i]);
}
}
}
code_ = GenerateConv(gpu_info, definition_, conv_params_);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
if (gpu_info.IsMali()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
compiler_options_.push_back(CompilerOptions::kClRegisterAllocation64);
}
if (conv_params_.IsPrivateMemBroadcast() &&
(gpu_info.IsCL20OrHigher() || gpu_info.opencl_info.IsCLVK())) {
compiler_options_.push_back(CompilerOptions::kCl20);
}
bool kernel_is_trivial =
conv_params_.x_kernel_is_1 && conv_params_.y_kernel_is_1;
if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) {
kernel_is_trivial = kernel_is_trivial & conv_params_.z_kernel_is_1;
}
if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx() &&
definition_.precision == CalculationsPrecision::F16 &&
kernel_is_trivial) {
compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd);
}
}
absl::Status ConvGeneric::BindArguments(ArgumentsBinder* args) {
const int task_size_b = dst_[0]->Batch();
const int task_size_x =
DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x);
const int task_size_y =
DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y);
const int task_size_z =
DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z);
RETURN_IF_ERROR(args->SetInt("task_size_b", task_size_b));
RETURN_IF_ERROR(args->SetInt("task_size_x", task_size_x));
RETURN_IF_ERROR(args->SetInt("task_size_y", task_size_y));
RETURN_IF_ERROR(args->SetInt("task_size_z", task_size_z));
return absl::OkStatus();
}
int3 ConvGeneric::GetGridSize() const {
const int task_size_b = dst_[0]->Batch();
const int task_size_x =
DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x);
const int task_size_y =
DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y);
const int task_size_z =
DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z);
const int task_size_s =
DivideRoundUp(dst_[0]->Slices(), conv_params_.block_size.w);
int3 wg;
if (conv_params_.linear_all) {
return int3(
task_size_x * task_size_b * task_size_y * task_size_z * task_size_s, 1,
1);
} else if (conv_params_.linear_spatial) {
return int3(task_size_x * task_size_b * task_size_y * task_size_z,
task_size_s, 1);
} else {
return int3(task_size_x * task_size_b, task_size_y * task_size_z,
task_size_s);
}
}
void ConvGeneric::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (conv_params_.weights_upload_type ==
WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP ||
conv_params_.weights_upload_type ==
WeightsUploadType::LOCAL_MEM_BY_THREADS ||
conv_params_.fixed_work_group_size) {
work_groups->push_back(work_group_size_);
return;
}
GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
std::string ConvGeneric::GenerateConv(const GpuInfo& gpu_info,
const OperationDef& op_def,
const ConvParams& conv_params) {
const auto& src_def = op_def.src_tensors[0];
auto generate_id = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string id;
if (src_def.HasAxis(Axis::WIDTH)) {
id += "_w" + x;
}
if (src_def.HasAxis(Axis::HEIGHT)) {
id += "_h" + y;
}
if (src_def.HasAxis(Axis::DEPTH)) {
id += "_d" + z;
}
return id;
};
auto generate_id_full = [&](const std::string& x, const std::string& y,
const std::string& z, const std::string& s) {
return generate_id(x, y, z) + "_s" + s;
};
auto generate_check = [&](const std::string& x, const std::string& y,
const std::string& z) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH};
const std::vector<std::string> names{"in_x", "in_y", "in_z"};
const std::vector<bool> is_1{conv_params_.x_kernel_is_1,
conv_params_.y_kernel_is_1,
conv_params_.z_kernel_is_1};
const std::vector<std::string> coords{x, y, z};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_def.HasAxis(axis) && !src_def.SupportsZeroClamp(axis, gpu_info) &&
!is_1[i]) {
if (!check.empty()) {
check += " && ";
}
check += names[i] + coords[i];
}
}
return check;
};
if (!conv_params_.x_kernel_is_1) {
args_.AddInt("stride_x", stride_.x);
args_.AddInt("padding_x", padding_.x);
args_.AddInt("kernel_size_x", kernel_size_.x);
args_.AddInt("dilation_x", dilation_.x);
}
if (!conv_params_.y_kernel_is_1) {
args_.AddInt("stride_y", stride_.y);
args_.AddInt("padding_y", padding_.y);
args_.AddInt("kernel_size_y", kernel_size_.y);
args_.AddInt("dilation_y", dilation_.y);
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
args_.AddInt("stride_z", stride_.z);
args_.AddInt("padding_z", padding_.z);
args_.AddInt("kernel_size_z", kernel_size_.z);
args_.AddInt("dilation_z", dilation_.z);
}
args_.AddInt("task_size_b");
args_.AddInt("task_size_x");
args_.AddInt("task_size_y");
args_.AddInt("task_size_z");
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
const bool need_local_mem =
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP;
const int local_mem_size =
conv_params.block_size.w * 4 * conv_params.src_depth_loop_size;
const bool use_simd_broadcast = conv_params.IsPrivateMemBroadcast();
const int simd_size = conv_params.simd_size;
const bool late_oob_check = need_local_mem || use_simd_broadcast;
const std::string weights_space =
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
std::string c;
if (use_simd_broadcast && gpu_info.IsApiOpenCl()) {
if (gpu_info.opencl_info.cl_version == OpenClVersion::kCl2_0 ||
gpu_info.SupportsExtension("cl_khr_subgroups")) {
c += "#pragma OPENCL EXTENSION cl_khr_subgroups : enable\n";
} else if (gpu_info.SupportsExtension("cl_intel_subgroups")) {
c += "#pragma OPENCL EXTENSION cl_intel_subgroups : enable\n";
}
}
const int4 block_size = conv_params.block_size;
if (conv_params.fixed_work_group_size && gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(" +
std::to_string(work_group_size_.x) + ", " +
std::to_string(work_group_size_.y) + ", " +
std::to_string(work_group_size_.z) + ")))\n";
}
if (use_simd_broadcast && gpu_info.IsApiOpenCl() &&
gpu_info.SupportsExtension("cl_intel_required_subgroup_size")) {
c += "__attribute__((intel_reqd_sub_group_size(" +
std::to_string(simd_size) + ")))\n";
}
std::string dst_oob_check;
if (src_def.HasAxis(Axis::DEPTH)) {
if (conv_params.linear_all) {
dst_oob_check = "DST_S >= args.dst_tensor.Slices()";
} else if (conv_params.linear_spatial) {
dst_oob_check =
"DST_Z >= args.dst_tensor.Depth() || DST_S >= "
"args.dst_tensor.Slices()";
} else {
dst_oob_check =
"DST_X >= args.dst_tensor.Width() || DST_Z >= "
"args.dst_tensor.Depth() || DST_S >= args.dst_tensor.Slices()";
}
} else {
if (conv_params.linear_all) {
dst_oob_check = "DST_S >= args.dst_tensor.Slices()";
} else if (conv_params.linear_spatial) {
dst_oob_check =
"DST_Y >= args.dst_tensor.Height() || DST_S >= "
"args.dst_tensor.Slices()";
} else {
dst_oob_check =
"DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || DST_S >= args.dst_tensor.Slices()";
}
}
c += "MAIN_FUNCTION($0) {\n";
c += GenerateBlockCoords(conv_params.block_size, work_group_launch_order_,
conv_params.linear_spatial, conv_params.linear_all,
src_def.HasAxis(Axis::DEPTH),
src_def.HasAxis(Axis::BATCH));
if (src_def.HasAxis(Axis::BATCH)) {
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
}
if (!conv_params.need_dst_loop) {
c += " DST_S = 0;\n";
}
c += " if (DST_S >= args.dst_tensor.Slices()) return;\n";
if (!late_oob_check) {
c += " if (" + dst_oob_check + ") {\n";
c += " return;\n";
c += " }\n";
}
if (conv_params.groups_support) {
c += " int conv_group_id = DST_S / args.dst_group_size;\n";
c += " int src_start_slice = conv_group_id * args.src_group_size;\n";
c += " int src_end_slice = src_start_slice + args.src_group_size;\n";
}
const std::string src_group_start_slice =
conv_params.groups_support ? "src_start_slice" : "0";
const std::string src_group_end_slice =
conv_params.groups_support ? "src_end_slice" : "args.src_tensor.Slices()";
const std::string src_group_slices = conv_params.groups_support
? "args.src_group_size"
: "args.src_tensor.Slices()";
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
if (conv_params.linear_spatial) {
c += " int lid = LOCAL_ID_0;\n";
} else {
c += " int lid = LOCAL_ID_1 * " + std::to_string(work_group_size_.x) +
" + LOCAL_ID_0;\n";
}
}
if (use_simd_broadcast) {
c += " int simd_id = SUB_GROUP_LOCAL_ID;\n";
}
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
c += " ACCUM_FLT4 r" + generate_id_full(xind, yind, zind, sind) +
" = INIT_ACCUM_FLT4(0.0f);\n";
}
}
}
}
if (!conv_params_.x_kernel_is_1) {
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string xc = "(DST_X + " + xind + ")";
c += " int xc" + xind + " = " + xc +
" * args.stride_x + args.padding_x;\n";
}
} else {
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
c += " int xc" + xind + " = DST_X + " + xind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) {
c += " xc" + xind + " = clamp(xc" + xind +
", 0, args.src_tensor.Width() - 1);\n";
}
}
}
if (!conv_params_.y_kernel_is_1) {
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
const std::string yc = "(DST_Y + " + yind + ")";
c += " int yc" + yind + " = " + yc +
" * args.stride_y + args.padding_y;\n";
}
} else {
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
c += " int yc" + yind + " = DST_Y + " + yind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) {
c += " yc" + yind + " = clamp(yc" + yind +
", 0, args.src_tensor.Height() - 1);\n";
}
}
}
if (src_def.HasAxis(Axis::DEPTH)) {
if (!conv_params_.z_kernel_is_1) {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
const std::string zc = "(DST_Z + " + zind + ")";
c += " int zc" + zind + " = " + zc +
" * args.stride_z + args.padding_z;\n";
}
} else {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
c += " int zc" + zind + " = DST_Z + " + zind + ";\n";
if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) {
c += " zc" + zind + " = clamp(zc" + zind +
", 0, args.src_tensor.Depth() - 1);\n";
}
}
}
}
bool trivial_kernel_size =
conv_params_.x_kernel_is_1 && conv_params_.y_kernel_is_1;
if (src_def.HasAxis(Axis::DEPTH)) {
trivial_kernel_size = trivial_kernel_size && conv_params_.z_kernel_is_1;
}
const std::string weights_global_ptr =
weights_space + " " + ToCLDataType(conv_params.weights_data_type, 4) +
"*";
DataType summable_data_type = conv_params.weights_data_type;
if (gpu_info.IsPowerVR() &&
op_def.precision == CalculationsPrecision::F32_F16 &&
conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
summable_data_type = DataType::FLOAT32;
}
if (need_local_mem) {
c += " __local " + ToCLDataType(summable_data_type, 4) +
" weights_cache[" + std::to_string(local_mem_size) + "];\n";
} else if (conv_params.AreWeightsBuffer() &&
gpu_info.SupportsPointersInKernels()) {
c += " " + weights_global_ptr + " weights_cache;\n";
} else if (!trivial_kernel_size) {
c += " int filter_offset = 0;\n";
}
if (conv_params.AreWeightsBuffer()) {
std::string offset;
if (conv_params.different_weights_for_height) {
offset = "(DST_S * args.src_tensor.Height() + DST_Y * " +
std::to_string(block_size.w) +
") * 4 * args.src_tensor.Slices()";
} else {
std::string kernel_spatial_offset = "";
if (!conv_params_.x_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_x";
}
if (!conv_params_.y_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_y";
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
kernel_spatial_offset += " * args.kernel_size_z";
}
offset = "DST_S * 4 * " + src_group_slices + kernel_spatial_offset;
}
if (gpu_info.SupportsPointersInKernels()) {
c += " " + weights_global_ptr +
" filters_loc = args.weights.GetPtr() + " + offset + ";\n";
} else {
c += " int filters_offset = " + offset + ";\n";
}
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
c += " for (int kz = 0; kz < args.kernel_size_z; ++kz) {\n";
for (int z = 0; z < block_size.z; ++z) {
const std::string zck = "zck" + std::to_string(z);
c += " int zck" + std::to_string(z) + " = kz * args.dilation_z + zc" +
std::to_string(z) + ";\n";
if (!src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) {
c += " bool in_z" + std::to_string(z) + " = " + zck + " >= 0 && " +
zck + " < args.src_tensor.Depth();\n";
if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) {
c += " " + zck + " = clamp(" + zck +
", 0, args.src_tensor.Depth() - 1);\n";
}
}
}
}
if (!conv_params_.y_kernel_is_1) {
c += " for (int ky = 0; ky < args.kernel_size_y; ++ky) {\n";
for (int y = 0; y < block_size.y; ++y) {
const std::string yck = "yck" + std::to_string(y);
c += " int " + yck + " = ky * args.dilation_y + yc" + std::to_string(y) +
";\n";
if (!src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y" + std::to_string(y) + " = " + yck + " >= 0 && " +
yck + " < args.src_tensor.Height();\n";
if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) {
c += " " + yck + " = clamp(" + yck +
", 0, args.src_tensor.Height() - 1);\n";
}
}
}
}
if (!conv_params_.x_kernel_is_1) {
c += " for (int kx = 0; kx < args.kernel_size_x; ++kx) {\n";
for (int x = 0; x < block_size.x; ++x) {
const std::string xck = "xck" + std::to_string(x);
c += " int xck" + std::to_string(x) + " = kx * args.dilation_x + xc" +
std::to_string(x) + ";\n";
if (!src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x" + std::to_string(x) + " = " + xck + " >= 0 && " +
xck + " < args.src_tensor.Width();\n";
if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) {
c += " " + xck + " = clamp(" + xck +
", 0, args.src_tensor.Width() - 1);\n";
}
}
}
}
const bool need_multiple_slice_strides =
src_def.ReturnsZeroForNegOneRead(gpu_info) && !trivial_kernel_size;
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string xc = conv_params.x_kernel_is_1 ? "xc" + xind : "xck" + xind;
std::string yc = conv_params.y_kernel_is_1 ? "yc" + yind : "yck" + yind;
const std::string id = generate_id(xind, yind, zind);
std::string coords = "" + xc + ", " + yc;
if (src_def.HasAxis(Axis::DEPTH)) {
std::string zc =
conv_params.z_kernel_is_1 ? "zc" + zind : "zck" + zind;
coords += ", " + zc;
}
if (src_def.IsLinear()) {
c += " int addr" + id + " = args.src_tensor.GetAddress(" + coords +
", " + src_group_start_slice + ");\n";
if (need_multiple_slice_strides) {
const std::string check = generate_check(xind, yind, zind);
c += " addr" + id + " = select(-1, addr" + id + ", (" + check +
"));\n";
c += " int ds" + id +
" = select(0, args.src_tensor.SliceStride(), (" + check +
"));\n";
}
}
}
}
}
if (src_def.IsLinear() && !need_multiple_slice_strides) {
c += " int ds = args.src_tensor.SliceStride();\n";
}
auto declare_src = [&]() {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id(xind, yind, zind);
c += " " + ToCLDataType(summable_data_type, 4) + " src" + id +
";\n";
}
}
}
};
const bool conditional_read = gpu_info.IsMali();
auto read_src = [&]() {
const std::string read_as_type = ToCLDataType(summable_data_type);
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string id = generate_id(xind, yind, zind);
const std::string check = generate_check(xind, yind, zind);
std::string address;
if (src_def.IsLinear()) {
address = "addr" + id;
} else {
std::string xc =
conv_params.x_kernel_is_1 ? "xc" + xind : "xck" + xind;
std::string yc =
conv_params.y_kernel_is_1 ? "yc" + yind : "yck" + yind;
address = "" + xc + ", " + yc;
if (src_def.HasAxis(Axis::DEPTH)) {
std::string zc =
conv_params.z_kernel_is_1 ? "zc" + zind : "zck" + zind;
address += ", " + zc;
}
address += ", s";
}
if (src_def.ReturnsZeroForNegOneRead(gpu_info)) {
c += " src" + id + " = args.src_tensor.Read<" + read_as_type +
">(" + address + ");\n";
const std::string ds = trivial_kernel_size ? "ds" : "ds" + id;
c += " " + address + " += " + ds + ";\n";
} else {
if (!check.empty()) {
if (conditional_read) {
c += " src" + id + " = " + check +
" ? args.src_tensor.Read<" + read_as_type + ">(" +
address + ") : INIT_FLT4(0.0f);\n";
} else {
c += " src" + id + " = args.src_tensor.Read<" +
read_as_type + ">(" + address + ") * INIT_FLT(" + check +
");\n";
}
} else {
c += " src" + id + " = args.src_tensor.Read<" + read_as_type +
">(" + address + ");\n";
}
if (src_def.IsLinear()) {
c += " " + address + " += ds;\n";
}
}
}
}
}
};
bool use_fma = gpu_info.IsAMD() && gpu_info.IsApiOpenCl();
auto conv_core = [&](int shared_offset) {
const std::string channels[] = {"x", "y", "z", "w"};
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
if (op_def.precision != CalculationsPrecision::F32_F16 ||
summable_data_type == DataType::FLOAT32) {
for (int ch = 0; ch < 4; ++ch) {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string R = "r" + generate_id_full(xind, yind, zind, sind);
std::string S = "src" + generate_id(xind, yind, zind);
if (use_simd_broadcast) {
int simd_id = (s * 4 + ch + shared_offset) / simd_size;
int thread_id = (s * 4 + ch + shared_offset) % simd_size;
std::string w_val_x = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".x, " +
std::to_string(thread_id) + "u)";
std::string w_val_y = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".y, " +
std::to_string(thread_id) + "u)";
std::string w_val_z = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".z, " +
std::to_string(thread_id) + "u)";
std::string w_val_w = "SUB_GROUP_BROADCAST(simd_w" +
std::to_string(simd_id) + ".w, " +
std::to_string(thread_id) + "u)";
if (GetWeightsDescription().IsI4O4()) {
c += " " + R + ".x += " + w_val_x + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".y += " + w_val_y + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".z += " + w_val_z + " * " + S + "." +
channels[ch] + ";\n";
c += " " + R + ".w += " + w_val_w + " * " + S + "." +
channels[ch] + ";\n";
} else {
c += " " + R + "." + channels[ch] + " += " + w_val_x +
" * " + S + ".x;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_y +
" * " + S + ".y;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_z +
" * " + S + ".z;\n";
c += " " + R + "." + channels[ch] + " += " + w_val_w +
" * " + S + ".w;\n";
}
} else {
const std::string weight_id =
std::to_string(s * 4 + ch + shared_offset);
std::string w_val;
if (conv_params.AreWeightsBuffer()) {
if (need_local_mem ||
gpu_info.SupportsPointersInKernels()) {
w_val = "weights_cache[" + weight_id + "]";
} else {
w_val = "args.weights.Read(filters_offset + " +
weight_id + ")";
}
} else {
w_val = "f" + weight_id;
}
if (GetWeightsDescription().IsI4O4()) {
if (use_fma) {
c += " " + R + " = fma(" + w_val + ", " + S + "." +
channels[ch] + ", " + R + ");\n";
} else {
c += " " + R + " += " + w_val + " * " + S + "." +
channels[ch] + ";\n";
}
} else {
c += " " + R + "." + channels[ch] + " += dot(" + w_val +
", " + S + ");\n";
}
}
}
}
}
}
} else {
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
std::string R = "r" + generate_id_full(xind, yind, zind, sind);
std::string S = "src" + generate_id(xind, yind, zind);
std::vector<std::string> F(4);
for (int i = 0; i < 4; ++i) {
std::string weight_id =
std::to_string(s * 4 + i + shared_offset);
if (conv_params.AreWeightsBuffer()) {
if (need_local_mem || gpu_info.SupportsPointersInKernels()) {
F[i] = "weights_cache[" + weight_id + "]";
} else {
F[i] =
"args.weights.Read(filters_offset + " + weight_id + ")";
}
} else {
F[i] = "f" + weight_id;
}
}
if (GetWeightsDescription().IsI4O4()) {
c += " " + R + " += TO_ACCUM_TYPE(" + S + ".x * " + F[0] +
" + " + S + ".y * " + F[1] + " + " + S + ".z * " + F[2] +
" + " + S + ".w * " + F[3] + ");\n";
} else {
c += " " + R + ".x += dot(" + S + ", " + F[0] + ");\n";
c += " " + R + ".y += dot(" + S + ", " + F[1] + ");\n";
c += " " + R + ".z += dot(" + S + ", " + F[2] + ");\n";
c += " " + R + ".w += dot(" + S + ", " + F[3] + ");\n";
}
}
}
}
}
}
};
c += " int s = " + src_group_start_slice + ";\n";
if (conv_params.need_src_loop) {
c += " do {\n";
}
declare_src();
const int total_work_items =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string type_conversion = GetTypeConversion(
gpu_info, conv_params.weights_data_type, summable_data_type, 4);
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP) {
c += GenerateAsyncUpload("weights_cache", "filters_loc",
"", local_mem_size);
} else if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
if (gpu_info.IsApiMetal() && wg_total_size == 32 &&
gpu_info.IsWaveSizeEqualTo32()) {
c += " SIMDGROUP_BARRIER(mem_flags::mem_none);\n";
} else {
c += " " + barrier + ";\n";
}
if (gpu_info.SupportsPointersInKernels()) {
c += GenerateUploadByThreads("weights_cache", "filters_loc",
true,
"", type_conversion,
"lid", total_work_items, local_mem_size);
} else {
c += GenerateUploadByThreads("weights_cache", "args.weights",
false, "filters_offset",
type_conversion, "lid", total_work_items,
local_mem_size);
}
} else if (use_simd_broadcast) {
int parts = local_mem_size / simd_size;
int reminder = local_mem_size % simd_size;
const std::string read_start = gpu_info.SupportsPointersInKernels()
? "filters_loc["
: "args.weights.Read(filters_offset + ";
const std::string read_end =
gpu_info.SupportsPointersInKernels() ? "]" : ")";
for (int i = 0; i < parts; ++i) {
const std::string weights_index =
"simd_id + " + std::to_string(i * simd_size);
c += " FLT4 simd_w" + std::to_string(i) + " = " + read_start +
weights_index + read_end + ";\n";
}
if (reminder) {
const std::string weights_index =
"simd_id + " + std::to_string(parts * simd_size);
c += " FLT4 simd_w" + std::to_string(parts) + ";\n";
c += " if (simd_id < " + std::to_string(reminder) + ") {\n";
c += " simd_w" + std::to_string(parts) + " = " + read_start +
weights_index + read_end + ";\n";
c += " }\n";
}
} else if (conv_params.AreWeightsBuffer()) {
if (gpu_info.SupportsPointersInKernels()) {
c += " weights_cache = filters_loc;\n";
}
} else {
for (int dst_s = 0; dst_s < block_size.w; ++dst_s) {
std::string f_y = trivial_kernel_size ? "s" : "filter_offset";
if (trivial_kernel_size && conv_params.groups_support) {
f_y = "s - src_start_slice";
}
if (conv_params.different_weights_for_height) {
f_y = "DST_Y * args.src_tensor.Slices() + s";
}
c += absl::Substitute(
R"( FLT4 f$2 = args.weights0.Read(DST_S + $0, $1);
FLT4 f$3 = args.weights1.Read(DST_S + $0, $1);
FLT4 f$4 = args.weights2.Read(DST_S + $0, $1);
FLT4 f$5 = args.weights3.Read(DST_S + $0, $1);
)",
dst_s, f_y, dst_s * 4 + 0, dst_s * 4 + 1, dst_s * 4 + 2,
dst_s * 4 + 3);
}
if (!trivial_kernel_size) {
c += " filter_offset++;\n";
}
}
read_src();
c += " s += 1;\n";
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " " + barrier + ";\n";
}
conv_core(0);
for (int i = 1; i < conv_params.src_depth_loop_size; ++i) {
read_src();
conv_core(i * block_size.w * 4);
c += " s += 1;\n";
}
if (conv_params.AreWeightsBuffer()) {
if (gpu_info.SupportsPointersInKernels()) {
c += " filters_loc += " + std::to_string(local_mem_size) + ";\n";
} else {
c += " filters_offset += " + std::to_string(local_mem_size) + ";\n";
}
}
if (conv_params.need_src_loop) {
c += " } while (s < " + src_group_end_slice + ");\n";
}
if (!conv_params.x_kernel_is_1) {
c += " };\n";
}
if (!conv_params.y_kernel_is_1) {
c += " };\n";
}
if (src_def.HasAxis(Axis::DEPTH) && !conv_params_.z_kernel_is_1) {
c += " };\n";
}
if (conv_params.AreWeightsBuffer()) {
if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP) {
c += GenerateAsyncUpload("weights_cache", "args.biases.GetPtr()", "DST_S",
block_size.w);
} else if (conv_params.weights_upload_type ==
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " " + barrier + ";\n";
c += GenerateUploadByThreads("weights_cache", "args.biases",
false, "DST_S", type_conversion,
"lid", total_work_items, block_size.w);
c += " " + barrier + ";\n";
} else if (gpu_info.SupportsPointersInKernels()) {
c += " weights_cache = args.biases.GetPtr() + DST_S;\n";
}
}
if (late_oob_check) {
c += " if (" + dst_oob_check + ") {\n";
c += " return;\n";
c += " }\n";
}
auto generate_dst_check = [&](int x, int y, int z) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH};
const std::vector<std::string> names{"Width()", "Height()", "Depth()"};
std::vector<std::string> coords(3);
coords[0] = "DST_X + " + std::to_string(x);
coords[1] = "DST_Y + " + std::to_string(y);
coords[2] = "DST_Z + " + std::to_string(z);
const std::vector<int> ids{x, y, z};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_def.HasAxis(axis) && ids[i] != 0) {
if (!check.empty()) {
check += " && ";
}
check += coords[i] + " < args.dst_tensor." + names[i];
}
}
return check;
};
for (int s = 0; s < block_size.w; ++s) {
const std::string sind = std::to_string(s);
c += " if (DST_S + " + sind + " >= args.dst_tensor.Slices()) return;\n";
c += " {\n";
if (conv_params.AreWeightsBuffer() &&
(need_local_mem || gpu_info.SupportsPointersInKernels())) {
c += " FLT4 bias_val = TO_FLT4(weights_cache[" + sind + "]);\n";
} else {
c += " FLT4 bias_val = args.biases.Read(DST_S + " + sind + ");\n";
}
for (int z = 0; z < block_size.z; ++z) {
const std::string zind = std::to_string(z);
for (int y = 0; y < block_size.y; ++y) {
const std::string yind = std::to_string(y);
for (int x = 0; x < block_size.x; ++x) {
const std::string xind = std::to_string(x);
const std::string id = generate_id_full(xind, yind, zind, sind);
const std::string check = generate_dst_check(x, y, z);
std::string coords = "DST_X + " + xind + ", DST_Y + " + yind;
if (src_def.HasAxis(Axis::DEPTH)) {
coords += ", DST_Z + " + zind;
}
coords += ", DST_S + " + sind;
if (!check.empty()) {
c += " if (" + check + ") {\n";
} else {
c += " {\n";
}
c += " FLT4 res = TO_FLT4(r" + id + ") + bias_val;\n";
c += " args.dst_tensor.Write(res, " + coords + ");\n";
c += " }\n";
}
}
}
c += " }\n";
}
c += "}\n";
return c;
}
int GetGroupsCount(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x, wg_size.x) * DivideRoundUp(grid_y, wg_size.y) *
DivideRoundUp(grid_z, wg_size.z);
}
int GetGroupsCountForLinearWH(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x * grid_y, wg_size.x) *
DivideRoundUp(grid_z, wg_size.y);
}
int GetGroupsCountForLinearWHS(const BHWC& dst_shape, const int3& wg_size,
const int4& block_size) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int grid_x = DivideRoundUp(dst_shape.w, block_size.x) * dst_shape.b;
int grid_y = DivideRoundUp(dst_shape.h, block_size.y);
int grid_z = DivideRoundUp(dst_slices, block_size.w);
return DivideRoundUp(grid_x * grid_y * grid_z, wg_size.x);
}
bool IsKernelXIs1(const Convolution2DAttributes& attr) {
return attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 && attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
}
bool IsKernelYIs1(const Convolution2DAttributes& attr) {
return attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 && attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
}
int GetMaximumPossibleWavesCount(const AppleInfo& apple_info,
const BHWC& dst_shape) {
if (apple_info.IsLocalMemoryPreferredOverGlobal()) {
return GetGroupsCountForLinearWH(dst_shape, {32, 1, 1}, int4(1, 1, 1, 1));
} else {
return GetGroupsCountForLinearWHS(dst_shape, {32, 1, 1}, int4(1, 1, 1, 1));
}
}
int GetRecommendedBlockSize(const AppleInfo& apple_info,
const BHWC& dst_shape) {
const int max_waves = GetMaximumPossibleWavesCount(apple_info, dst_shape);
const int cu_count = apple_info.GetComputeUnitsCount();
if (max_waves >= cu_count * 64) {
return 8;
} else if (max_waves >= cu_count * 32) {
return 4;
} else if (max_waves >= cu_count * 16) {
return 2;
} else {
return 1;
}
}
struct WorkGroupSizeOption {
enum class ThreadMapping { kDefault, kLinearSpatial, kLinearAll };
int3 work_group_size;
int work_groups_count;
ThreadMapping thread_mapping;
float penalty = 1.0f;
};
WorkGroupSizeOption CreateWorkGroupSizeOption(
const int3& work_group_size,
WorkGroupSizeOption::ThreadMapping mapping_type, float penalty,
const BHWC& dst_shape, const int4& block_size) {
WorkGroupSizeOption wg;
wg.work_group_size = work_group_size;
wg.thread_mapping = mapping_type;
wg.penalty = penalty;
if (mapping_type == WorkGroupSizeOption::ThreadMapping::kDefault) {
wg.work_groups_count =
GetGroupsCount(dst_shape, work_group_size, block_size);
} else if (mapping_type ==
WorkGroupSizeOption::ThreadMapping::kLinearSpatial) {
wg.work_groups_count =
GetGroupsCountForLinearWH(dst_shape, work_group_size, block_size);
} else if (mapping_type == WorkGroupSizeOption::ThreadMapping::kLinearAll) {
wg.work_groups_count =
GetGroupsCountForLinearWHS(dst_shape, work_group_size, block_size);
}
return wg;
}
ConvGeneric::ConvParams GetConvParamsForA7A8(const AppleInfo& apple_info,
bool x_kernel_is_1,
bool y_kernel_is_1, int src_slices,
const BHWC& dst_shape) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int blk_total_size = GetRecommendedBlockSize(apple_info, dst_shape);
int3 block_size = int3(1, 1, 1);
if (blk_total_size >= 4 && (dst_slices % 4 == 0 || dst_slices >= 16)) {
block_size.z = 4;
blk_total_size /= 4;
} else if (blk_total_size >= 2 && (dst_slices % 2 == 0 || dst_slices >= 4)) {
block_size.z = 2;
blk_total_size /= 2;
}
if (blk_total_size >= 4) {
block_size.x = 2;
block_size.y = 2;
blk_total_size /= 4;
} else if (blk_total_size >= 2) {
if (dst_shape.w % 2 != 0 && dst_shape.h % 2 == 0) {
block_size.y = 2;
} else {
block_size.x = 2;
}
blk_total_size /= 2;
}
ConvGeneric::ConvParams params;
params.weights_upload_type =
ConvGeneric::WeightsUploadType::LOCAL_MEM_BY_THREADS;
params.x_kernel_is_1 = x_kernel_is_1;
params.y_kernel_is_1 = y_kernel_is_1;
params.src_depth_loop_size = 1;
params.block_size.x = block_size.x;
params.block_size.y = block_size.y;
params.block_size.z = 1;
params.block_size.w = block_size.z;
params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
std::vector<WorkGroupSizeOption> options;
options.push_back(CreateWorkGroupSizeOption(
{8, 4, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.0f, dst_shape,
params.block_size));
if (!apple_info.IsFamilyApple1()) {
options.push_back(CreateWorkGroupSizeOption(
{4, 4, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{4, 2, 1}, WorkGroupSizeOption::ThreadMapping::kDefault, 1.25f,
dst_shape, params.block_size));
}
options.push_back(CreateWorkGroupSizeOption(
{32, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.0f,
dst_shape, params.block_size));
if (!apple_info.IsFamilyApple1()) {
options.push_back(CreateWorkGroupSizeOption(
{16, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{8, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearSpatial, 1.25f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{32, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.0f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{16, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.01f,
dst_shape, params.block_size));
options.push_back(CreateWorkGroupSizeOption(
{8, 1, 1}, WorkGroupSizeOption::ThreadMapping::kLinearAll, 3.1 * 1.25f,
dst_shape, params.block_size));
}
float optimum = options[0].work_groups_count * options[0].penalty *
options[0].work_group_size.x * options[0].work_group_size.y *
options[0].work_group_size.z;
int optimum_index = 0;
for (int i = 1; i < options.size(); ++i) {
float local_optimum = options[i].work_groups_count * options[i].penalty *
options[i].work_group_size.x *
options[i].work_group_size.y *
options[i].work_group_size.z;
if (local_optimum < optimum) {
optimum = local_optimum;
optimum_index = i;
}
}
WorkGroupSizeOption optimum_wg = options[optimum_index];
if (optimum_wg.thread_mapping ==
WorkGroupSizeOption::ThreadMapping::kLinearSpatial) {
params.linear_spatial = true;
params.linear_all = false;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(1, 0, 2);
} else if (optimum_wg.thread_mapping ==
WorkGroupSizeOption::ThreadMapping::kLinearAll) {
params.linear_spatial = false;
params.linear_all = true;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(0, 1, 2);
params.weights_upload_type = ConvGeneric::WeightsUploadType::GLOBAL_MEM;
} else {
params.linear_spatial = false;
params.linear_all = false;
params.work_group_size = optimum_wg.work_group_size;
params.work_group_launch_order = int3(2, 0, 1);
}
int total_elements = params.block_size.x * params.block_size.y *
params.block_size.z * params.block_size.w;
if (total_elements == 1) {
if (src_slices % 4 == 0) {
params.src_depth_loop_size = 4;
} else if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
} else if (total_elements == 2) {
if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
}
if (params.src_depth_loop_size == src_slices) {
params.need_src_loop = false;
}
if (params.block_size.w == dst_slices) {
params.need_dst_loop = false;
}
const bool use_filters_constants =
!params.need_dst_loop && !params.need_src_loop && params.x_kernel_is_1 &&
params.y_kernel_is_1;
if (use_filters_constants) {
params.weights_upload_type = ConvGeneric::WeightsUploadType::CONSTANT_MEM;
}
return params;
}
ConvGeneric::ConvParams GetConvParamsForA9AndHigher(const AppleInfo& apple_info,
bool x_kernel_is_1,
bool y_kernel_is_1,
int src_slices,
const BHWC& dst_shape) {
const int dst_slices = DivideRoundUp(dst_shape.c, 4);
int blk_total_size = GetRecommendedBlockSize(apple_info, dst_shape);
int3 block_size = int3(1, 1, 1);
if (blk_total_size >= 2 && apple_info.IsBionic()) {
if (dst_shape.h % 2 != 0 && dst_shape.w % 2 == 0) {
block_size.x = 2;
} else {
block_size.y = 2;
}
blk_total_size /= 2;
}
if (blk_total_size >= 4 && (dst_slices % 4 == 0 || dst_slices >= 16)) {
block_size.z = 4;
blk_total_size /= 4;
} else if (blk_total_size >= 2 && (dst_slices % 2 == 0 || dst_slices >= 4)) {
block_size.z = 2;
blk_total_size /= 2;
}
if (blk_total_size >= 4 && dst_slices == 3) {
block_size.z = 3;
blk_total_size /= 4;
}
ConvGeneric::ConvParams params;
params.weights_upload_type = ConvGeneric::WeightsUploadType::GLOBAL_MEM;
params.x_kernel_is_1 = x_kernel_is_1;
params.y_kernel_is_1 = y_kernel_is_1;
params.src_depth_loop_size = 1;
params.block_size.x = block_size.x;
params.block_size.y = block_size.y;
params.block_size.z = 1;
params.block_size.w = block_size.z;
params.linear_spatial = false;
params.linear_all = false;
params.work_group_size = int3(8, 4, 1);
params.work_group_launch_order = int3(2, 0, 1);
params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
int g1 = GetGroupsCount(dst_shape, params.work_group_size, params.block_size);
int g2 = GetGroupsCountForLinearWH(dst_shape, {32, 1, 1}, params.block_size);
int g3 = GetGroupsCountForLinearWHS(dst_shape, {32, 1, 1}, params.block_size);
if (g2 < g1) {
params.linear_spatial = true;
params.work_group_size = int3(32, 1, 1);
params.work_group_launch_order = int3(0, 1, 2);
}
float precise_threshold = apple_info.IsBionic() ? 1.0f : 1.04f;
float precise_ratio = static_cast<float>(g2) / static_cast<float>(g3);
if (precise_ratio > precise_threshold) {
params.linear_spatial = false;
params.linear_all = true;
params.work_group_size = int3(32, 1, 1);
}
int total_elements = params.block_size.x * params.block_size.y *
params.block_size.z * params.block_size.w;
if (total_elements == 1) {
if (src_slices % 4 == 0) {
params.src_depth_loop_size = 4;
} else if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
} else if (total_elements == 2) {
if (src_slices % 2 == 0) {
params.src_depth_loop_size = 2;
}
}
if (params.src_depth_loop_size == src_slices) {
params.need_src_loop = false;
}
if (params.block_size.w == dst_slices) {
params.need_dst_loop = false;
}
const bool use_filters_constants =
!params.need_dst_loop && !params.need_src_loop && params.x_kernel_is_1 &&
params.y_kernel_is_1;
if (use_filters_constants) {
params.weights_upload_type = ConvGeneric::WeightsUploadType::CONSTANT_MEM;
}
return params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParamsApple(
const GpuInfo& gpu_info, const OperationDef& definition, int src_depth,
int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1,
bool different_weights_for_height, const BHWC& dst_shape) {
if (gpu_info.apple_info.IsLocalMemoryPreferredOverGlobal()) {
return GetConvParamsForA7A8(gpu_info.apple_info, x_kernel_is_1,
y_kernel_is_1, src_depth, dst_shape);
} else {
return GetConvParamsForA9AndHigher(gpu_info.apple_info, x_kernel_is_1,
y_kernel_is_1, src_depth, dst_shape);
}
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition, int src_depth,
int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1,
bool different_weights_for_height, const BHWC* dst_shape) {
ConvParams conv_params;
conv_params.linear_spatial = false;
conv_params.linear_all = false;
conv_params.block_size = int4(1, 1, 1, 1);
conv_params.weights_data_type =
DeduceDataTypeFromPrecision(definition.precision);
conv_params.x_kernel_is_1 = x_kernel_is_1;
conv_params.y_kernel_is_1 = y_kernel_is_1;
conv_params.different_weights_for_height = different_weights_for_height;
if (gpu_info.IsNvidia()) {
if (different_weights_for_height) {
work_group_size_ = int3(32, 1, 1);
work_group_launch_order_ = int3(2, 0, 1);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_size_ = int3(32, 1, 1);
work_group_launch_order_ = int3(1, 0, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(2, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::LOCAL_MEM_BY_THREADS;
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
float task_size_per_cu =
static_cast<float>(task_size) / gpu_info.GetComputeUnitsCount();
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
float threads_per_cu = task_size_per_cu / block_size;
float warps_per_cu = threads_per_cu / 32 ;
if (warps_per_cu < 8.0f) {
conv_params.block_size.x = 1;
}
if (warps_per_cu < 4.0f && conv_params.block_size.w >= 4) {
conv_params.block_size.w /= 2;
}
if (warps_per_cu < 2.0f && conv_params.block_size.w >= 2) {
conv_params.block_size.w /= 2;
}
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
} else if (gpu_info.IsPowerVR()) {
if (gpu_info.IsCL30OrHigher()) {
work_group_size_ =
int3(gpu_info.opencl_info.preferred_work_group_size_multiple, 1, 1);
} else {
work_group_size_ = int3(32, 1, 1);
}
if (different_weights_for_height) {
work_group_launch_order_ = int3(2, 0, 1);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_launch_order_ = int3(1, 0, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(1, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
if (!gpu_info.IsApiOpenCl() ||
(gpu_info.IsApiOpenCl() &&
gpu_info.opencl_info.dedicated_local_memory)) {
if (definition.precision == CalculationsPrecision::F32_F16) {
conv_params.weights_upload_type =
WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else {
conv_params.weights_upload_type =
WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP;
}
} else {
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
}
if (dst_depth % 8 == 0 || dst_depth >= 32) {
conv_params.block_size.w = 8;
} else if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (definition.precision == CalculationsPrecision::F16) {
conv_params.block_size.w = std::min(4, conv_params.block_size.w);
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
if (conv_params.block_size.w == 1) {
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0) {
conv_params.src_depth_loop_size = 4;
}
if (src_depth <= 8) {
conv_params.src_depth_loop_size = src_depth;
}
}
conv_params.block_size.x = 2;
}
} else if (gpu_info.IsAMD()) {
work_group_size_ = int3(8, 4, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
if (gpu_info.IsApiOpenCl()) {
conv_params.weights_upload_type = WeightsUploadType::CONSTANT_MEM;
} else {
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
}
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size = int4(2, 2, 1, 4);
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size = int4(4, 2, 1, 2);
} else {
conv_params.block_size = int4(4, 4, 1, 1);
}
auto reduce_block_size_wzyx = [](int4* block_size) {
if (block_size->w % 2 == 0) {
block_size->w /= 2;
} else if (block_size->z % 2 == 0) {
block_size->z /= 2;
} else if (block_size->y % 2 == 0) {
block_size->y /= 2;
} else if (block_size->x % 2 == 0) {
block_size->x /= 2;
}
};
if (definition_.precision != CalculationsPrecision::F16) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
float task_size_per_cu =
static_cast<float>(task_size) / gpu_info.GetComputeUnitsCount();
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
float threads_per_cu = task_size_per_cu / block_size;
float warps_per_cu = threads_per_cu / 64;
if (warps_per_cu < 4.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 2.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 1.0f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
if (warps_per_cu < 0.5f) {
reduce_block_size_wzyx(&conv_params.block_size);
}
}
int block_size = conv_params.block_size.x * conv_params.block_size.y *
conv_params.block_size.w;
conv_params.src_depth_loop_size = 1;
if (block_size <= 4 && src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (block_size <= 2 && src_depth % 4 == 0) {
conv_params.src_depth_loop_size = 4;
}
if (block_size <= 1 && src_depth % 8 == 0) {
conv_params.src_depth_loop_size = 8;
}
} else if (gpu_info.IsMali()) {
int block_size = 2;
if (dst_shape) {
int task_size = dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
block_size = GetRecommendedBlockSizeForConv(
gpu_info, definition.precision, task_size);
}
if (!x_kernel_is_1 || !y_kernel_is_1) {
if (gpu_info.mali_info.IsMidgard() || gpu_info.mali_info.IsBifrost()) {
block_size = std::min(block_size, 4);
}
}
if (block_size == 8) {
if (dst_depth == 1 || dst_depth == 3) {
conv_params.block_size = int4(2, 2, 1, 1);
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
} else if (block_size == 4) {
if (dst_depth == 1 || dst_depth == 3) {
conv_params.block_size = int4(2, 2, 1, 1);
} else {
conv_params.block_size = int4(2, 1, 1, 1);
if (definition.precision == CalculationsPrecision::F32 &&
gpu_info.mali_info.IsValhall()) {
conv_params.block_size.y = 2;
} else {
conv_params.block_size.w = 2;
}
}
} else if (block_size == 2) {
conv_params.block_size = int4(2, 1, 1, 1);
} else {
conv_params.block_size = int4(1, 1, 1, 1);
}
if (dst_shape) {
if (dst_shape->w == 1) {
conv_params.block_size.y *= conv_params.block_size.x;
conv_params.block_size.x = 1;
}
if (dst_shape->h == 1) {
conv_params.block_size.x *= conv_params.block_size.y;
conv_params.block_size.y = 1;
}
}
conv_params.src_depth_loop_size = 1;
MaliInfo mali_info = gpu_info.mali_info;
if (src_depth % 2 == 0 && block_size <= 2 && !mali_info.IsMidgard()) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && block_size == 1 && !mali_info.IsMidgard() &&
definition.precision == CalculationsPrecision::F16) {
conv_params.src_depth_loop_size = 4;
}
work_group_size_ = int3(4, 4, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
} else if (gpu_info.IsAdreno()) {
if (dst_shape) {
const int wave_size = gpu_info.adreno_info.GetWaveSize(
definition.precision == CalculationsPrecision::F16);
const double task_size =
1.0 * dst_shape->w * dst_shape->b * dst_shape->h * dst_depth;
const double waves =
task_size / gpu_info.GetComputeUnitsCount() / wave_size;
if (waves <= 6.0f) {
conv_params.block_size = int4(1, 1, 1, 1);
} else if (waves <= 12.0f) {
conv_params.block_size = int4(2, 1, 1, 1);
} else if (waves <= 24.0f) {
conv_params.block_size = int4(2, 1, 1, 2);
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
} else {
conv_params.block_size = int4(2, 2, 1, 2);
}
if (gpu_info.adreno_info.IsAdreno3xx()) {
if (definition.precision == CalculationsPrecision::F16) {
conv_params.block_size = int4(2, 2, 1, 2);
} else if (definition.precision == CalculationsPrecision::F32_F16) {
conv_params.block_size = int4(2, 1, 1, 2);
} else {
conv_params.block_size = int4(2, 2, 1, 1);
}
}
work_group_size_ = int3(8, 2, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::TEXTURES_MEM_X4;
} else if (gpu_info.IsIntel()) {
if (different_weights_for_height) {
work_group_size_ = int3(16, 1, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = true;
} else {
conv_params.linear_spatial = true;
work_group_size_ = int3(16, 1, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = true;
}
conv_params.block_size = int4(1, 1, 1, 4);
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::LOCAL_MEM_BY_THREADS;
if (gpu_info.IsApiMetal() &&
definition.precision != CalculationsPrecision::F32_F16 &&
gpu_info.metal_info.IsMslVersionEqualOrHigher(2)) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = 8;
}
if (gpu_info.IsApiOpenCl() &&
definition.precision != CalculationsPrecision::F32_F16) {
const bool supports_subgroups =
gpu_info.SupportsExtension("cl_khr_subgroups") ||
gpu_info.SupportsExtension("cl_intel_subgroups") ||
gpu_info.opencl_info.IsCLVK();
if (supports_subgroups) {
const int kSubGroupSize = 16;
const bool supports_subgroup_size_control =
gpu_info.SupportsExtension("cl_intel_required_subgroup_size");
int min_subgroup_size;
auto min_subgroup_size_status =
gpu_info.GetMinSubGroupSize(min_subgroup_size);
if (supports_subgroup_size_control &&
gpu_info.SupportsSubGroupWithSize(kSubGroupSize)) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = kSubGroupSize;
} else if (supports_subgroup_size_control &&
min_subgroup_size_status.ok()) {
conv_params.weights_upload_type =
WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST;
conv_params.simd_size = min_subgroup_size;
work_group_size_ = int3(min_subgroup_size, 1, 1);
} else {
}
}
}
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
} else if (gpu_info.IsApple()) {
BHWC output_shape = BHWC(1, 32, 32, 128);
if (dst_shape) {
output_shape = *dst_shape;
}
conv_params = GuessBestParamsApple(
gpu_info, definition, src_depth, dst_depth, x_kernel_is_1,
y_kernel_is_1, different_weights_for_height, output_shape);
conv_params.fixed_work_group_size = true;
work_group_size_ = conv_params.work_group_size;
work_group_launch_order_ = conv_params.work_group_launch_order;
conv_params.weights_data_type =
DeduceDataTypeFromPrecision(definition.precision);
conv_params.x_kernel_is_1 = x_kernel_is_1;
conv_params.y_kernel_is_1 = y_kernel_is_1;
conv_params.different_weights_for_height = different_weights_for_height;
} else {
conv_params.block_size = int4(1, 1, 1, 4);
work_group_size_ = int3(8, 2, 1);
work_group_launch_order_ = int3(0, 1, 2);
conv_params.fixed_work_group_size = false;
conv_params.src_depth_loop_size = 1;
conv_params.weights_upload_type = WeightsUploadType::GLOBAL_MEM;
if (dst_depth % 4 == 0 || dst_depth >= 8) {
conv_params.block_size.w = 4;
} else if (dst_depth % 2 == 0 || dst_depth >= 4) {
conv_params.block_size.w = 2;
} else {
conv_params.block_size.w = dst_depth;
}
if (src_depth % 2 == 0) {
conv_params.src_depth_loop_size = 2;
}
if (src_depth % 4 == 0 && conv_params.block_size.w <= 2) {
conv_params.src_depth_loop_size = 4;
}
}
if (conv_params.AreWeightsBuffer()) {
if (gpu_info.IsApple()) {
conv_params.weights_layout = WeightsLayout::kOSpatialIOGroupO4I4;
} else {
conv_params.weights_layout = WeightsLayout::kOSpatialIOGroupI4O4;
}
} else {
if (gpu_info.IsApple()) {
conv_params.weights_layout =
WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4;
} else {
conv_params.weights_layout =
WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4;
}
}
return conv_params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution2DAttributes& attr, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const bool x_kernel_is_1 = attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
const bool y_kernel_is_1 = attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
return GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, dst_shape);
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution3DAttributes& attr, const BHWDC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
const bool x_kernel_is_1 = attr.weights.shape.w == 1 && attr.strides.w == 1 &&
attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 &&
attr.padding.appended.w == 0;
const bool y_kernel_is_1 = attr.weights.shape.h == 1 && attr.strides.h == 1 &&
attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 &&
attr.padding.appended.h == 0;
const bool z_kernel_is_1 = attr.weights.shape.d == 1 && attr.strides.d == 1 &&
attr.dilations.d == 1 &&
attr.padding.prepended.d == 0 &&
attr.padding.appended.d == 0;
ConvGeneric::ConvParams result;
BHWC shape;
if (dst_shape) {
shape.b = dst_shape->b;
shape.h = dst_shape->h * dst_shape->d;
shape.w = dst_shape->w;
shape.c = dst_shape->c;
result = GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, &shape);
} else {
result = GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, nullptr);
}
result.z_kernel_is_1 = z_kernel_is_1;
return result;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const Convolution2DAttributes& attr, const BHWC& weights_shape,
const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(weights_shape.b, 4);
const int src_depth = DivideRoundUp(weights_shape.c, 4);
const bool x_kernel_is_1 =
weights_shape.w == 1 && attr.strides.w == 1 && attr.dilations.w == 1 &&
attr.padding.prepended.w == 0 && attr.padding.appended.w == 0;
const bool y_kernel_is_1 =
weights_shape.h == 1 && attr.strides.h == 1 && attr.dilations.h == 1 &&
attr.padding.prepended.h == 0 && attr.padding.appended.h == 0;
return GuessBestParams(gpu_info, definition, src_depth, dst_depth,
x_kernel_is_1, y_kernel_is_1, false, dst_shape);
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParams(
const GpuInfo& gpu_info, const OperationDef& definition,
const FullyConnectedAttributes& attr, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4);
const int src_depth = DivideRoundUp(attr.weights.shape.i, 4);
ConvGeneric::ConvParams params = GuessBestParams(
gpu_info, definition, src_depth, dst_depth, true, true, false, dst_shape);
work_group_size_.x *= work_group_size_.y;
work_group_size_.y = 1;
params.block_size.x *= params.block_size.y;
params.block_size.y = 1;
return params;
}
ConvGeneric::ConvParams ConvGeneric::GuessBestParamsPointwise(
const GpuInfo& gpu_info, const OperationDef& definition,
const OHWI& weights_shape, const BHWC* dst_shape) {
const int dst_depth = DivideRoundUp(weights_shape.o, 4);
const int src_depth = DivideRoundUp(weights_shape.i, 4);
ConvGeneric::ConvParams params = GuessBestParams(
gpu_info, definition, src_depth, dst_depth, true, true, true, dst_shape);
params.block_size.x *= params.block_size.y;
params.block_size.y = 1;
work_group_size_.x *= work_group_size_.y;
work_group_size_.y = 1;
return params;
}
ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info,
const OperationDef& definition,
const FullyConnectedAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadData(attr.weights, attr.bias);
return result;
}
ConvGeneric CreateConvGenericDynamicWeights(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC& weights_shape,
const BHWC* dst_shape) {
ConvGeneric result(definition, attr, weights_shape, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadBias(attr.bias);
return result;
}
ConvGeneric CreateConvGenericBatchedMatMul(const GpuInfo& gpu_info,
const OperationDef& definition,
const OHWI& weights_shape,
const BHWC* dst_shape) {
ConvGeneric result(definition);
result.conv_params_ = result.GuessBestParamsPointwise(
gpu_info, definition, weights_shape, dst_shape);
result.GenerateCode(gpu_info);
tflite::gpu::Tensor<Linear, DataType::FLOAT32> biases;
biases.shape = Linear(weights_shape.o);
biases.data.resize(weights_shape.o, 0.0f);
result.UploadBias(biases);
return result;
}
ConvGeneric CreateConvGenericWino4x4To6x6(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution2DAttributes& attr,
const BHWC* dst_shape) {
ConvGeneric result(definition);
result.conv_params_ = result.GuessBestParamsPointwise(
gpu_info, definition, attr.weights.shape, dst_shape);
result.GenerateCode(gpu_info);
result.UploadDataForWinograd4x4To6x6(attr.weights);
return result;
}
ConvGeneric CreateConvGeneric3D(const GpuInfo& gpu_info,
const OperationDef& definition,
const Convolution3DAttributes& attr,
const BHWDC* dst_shape) {
ConvGeneric result(definition, attr, gpu_info, dst_shape);
result.GenerateCode(gpu_info);
result.UploadWeights(attr.weights);
result.UploadBias(attr.bias);
return result;
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvGeneric1x1SimpleWeights) {
const auto status = ConvGeneric1x1SimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGeneric1x1) {
const auto status = ConvGeneric1x1Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGenericSimpleWeights) {
const auto status = ConvGenericSimpleWeightsTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGeneric) {
const auto status = ConvGenericTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConvGenericGrouped) {
const auto status = ConvGenericGroupedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_generic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c0b342c4-0fc8-459b-b6f3-2ee8bbd33e58 | cpp | google/arolla | expr_operator | arolla/expr/expr_operator.cc | arolla/expr/expr_operator_test.cc | #include "arolla/expr/expr_operator.h"
#include <memory>
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr_node.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/util/demangle.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/meta.h"
#include "arolla/util/repr.h"
namespace arolla::expr {
absl::StatusOr<std::string> ExprOperator::GetDoc() const { return ""; }
absl::StatusOr<ExprNodePtr> ExprOperator::ToLowerLevel(
const ExprNodePtr& node) const {
return node;
}
ReprToken ExprOperator::GenReprToken() const {
const auto name = absl::CEscape(display_name_);
const auto hash = fingerprint_.PythonHash();
const auto cxx_type = TypeName(typeid(*this));
const auto short_cxx_type = cxx_type.substr(cxx_type.rfind(':') + 1);
const auto key = absl::CEscape(py_qvalue_specialization_key());
struct ReprToken result;
if (key.empty()) {
result.str =
absl::StrFormat("<Operator with name='%s', hash=0x%x, cxx_type='%s'>",
name, hash, short_cxx_type);
} else {
result.str = absl::StrFormat(
"<Operator with name='%s', hash=0x%x, cxx_type='%s', key='%s'>", name,
hash, short_cxx_type, key);
}
return result;
}
absl::string_view ExprOperator::py_qvalue_specialization_key() const {
return "";
}
bool IsBackendOperator(const ExprOperatorPtr& op,
absl::string_view name) {
return HasBackendExprOperatorTag(op) && op->display_name() == name;
}
}
namespace arolla {
using ::arolla::expr::ExprOperatorPtr;
void FingerprintHasherTraits<ExprOperatorPtr>::operator()(
FingerprintHasher* hasher, const ExprOperatorPtr& value) const {
hasher->Combine(value->fingerprint());
}
ReprToken ReprTraits<ExprOperatorPtr>::operator()(
const ExprOperatorPtr& value) const {
DCHECK(value != nullptr);
if (value == nullptr) {
return ReprToken{"<Operator nullptr>"};
}
return value->GenReprToken();
}
QTypePtr QTypeTraits<ExprOperatorPtr>::type() {
struct ExprOperatorQType final : SimpleQType {
ExprOperatorQType()
: SimpleQType(meta::type<ExprOperatorPtr>(), "EXPR_OPERATOR") {}
absl::string_view UnsafePyQValueSpecializationKey(
const void* source) const final {
if (const auto& op = *static_cast<const ExprOperatorPtr*>(source)) {
return op->py_qvalue_specialization_key();
}
return "";
}
};
static const absl::NoDestructor<ExprOperatorQType> result;
return result.get();
}
} | #include "arolla/expr/expr_operator.h"
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/backend_wrapping_operator.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::MatchesRegex;
TEST(ExprOperatorTest, IsBackendOperator) {
{ EXPECT_FALSE(IsBackendOperator(nullptr, "math.add")); }
{
ASSERT_OK_AND_ASSIGN(auto op, LookupOperator("math.add"));
EXPECT_FALSE(IsBackendOperator(op, "math.add"));
}
{
BackendWrappingOperator::TypeMetaEvalStrategy dummy_strategy =
[](absl::Span<const QTypePtr> types) { return nullptr; };
auto op = std::make_shared<BackendWrappingOperator>(
"math.add", ExprOperatorSignature::MakeVariadicArgs(), dummy_strategy);
EXPECT_TRUE(IsBackendOperator(op, "math.add"));
EXPECT_FALSE(IsBackendOperator(op, "foo.bar"));
}
}
TEST(ExprOperatorTest, ReprWithoutPyQValueSpecializationKey) {
class OperatorWithoutPythonWrapperKey final : public BasicExprOperator {
public:
OperatorWithoutPythonWrapperKey()
: BasicExprOperator("op'name", ExprOperatorSignature{}, "",
Fingerprint{0x0123456701234567}) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr>) const final {
return GetQType<float>();
}
};
ExprOperatorPtr op = std::make_shared<OperatorWithoutPythonWrapperKey>();
EXPECT_THAT(
Repr(op),
MatchesRegex("<Operator with name='op\\\\'name', hash=0x[0-9a-f]+, "
"cxx_type='OperatorWithoutPythonWrapperKey'>"));
}
TEST(ExprOperatorTest, ReprWithPyQValueSpecializationKey) {
class OperatorWithPythonWrapperKey final : public BasicExprOperator {
public:
OperatorWithPythonWrapperKey()
: BasicExprOperator("op'name", ExprOperatorSignature{}, "",
Fingerprint{0x0123456701234567}) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr>) const final {
return GetQType<float>();
}
absl::string_view py_qvalue_specialization_key() const final {
return "foo'bar";
}
};
ExprOperatorPtr op = std::make_shared<OperatorWithPythonWrapperKey>();
EXPECT_THAT(
Repr(op),
MatchesRegex(
"<Operator with name='op\\\\'name', hash=0x[0-9a-f]+, "
"cxx_type='OperatorWithPythonWrapperKey', key='foo\\\\'bar'>"));
}
TEST(ExprOperatorTest, GetDoc) {
class OperatorWithoutGetDoc final : public ExprOperator {
public:
OperatorWithoutGetDoc()
: ExprOperator("op'name", Fingerprint{0x0123456701234567}) {}
absl::StatusOr<ExprOperatorSignature> GetSignature() const override {
return ExprOperatorSignature{};
}
absl::StatusOr<ExprAttributes> InferAttributes(
absl::Span<const ExprAttributes>) const override {
return ExprAttributes();
}
};
EXPECT_THAT(OperatorWithoutGetDoc().GetDoc(), IsOkAndHolds(""));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
fb25460a-918c-4ca3-b138-5b2aa44d6b57 | cpp | google/quiche | crypto_framer | quiche/quic/core/crypto/crypto_framer.cc | quiche/quic/core/crypto/crypto_framer_test.cc | #include "quiche/quic/core/crypto/crypto_framer.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace {
const size_t kQuicTagSize = sizeof(QuicTag);
const size_t kCryptoEndOffsetSize = sizeof(uint32_t);
const size_t kNumEntriesSize = sizeof(uint16_t);
class OneShotVisitor : public CryptoFramerVisitorInterface {
public:
OneShotVisitor() : error_(false) {}
void OnError(CryptoFramer* ) override { error_ = true; }
void OnHandshakeMessage(const CryptoHandshakeMessage& message) override {
out_ = std::make_unique<CryptoHandshakeMessage>(message);
}
bool error() const { return error_; }
std::unique_ptr<CryptoHandshakeMessage> release() { return std::move(out_); }
private:
std::unique_ptr<CryptoHandshakeMessage> out_;
bool error_;
};
}
CryptoFramer::CryptoFramer()
: visitor_(nullptr),
error_detail_(""),
num_entries_(0),
values_len_(0),
process_truncated_messages_(false) {
Clear();
}
CryptoFramer::~CryptoFramer() {}
std::unique_ptr<CryptoHandshakeMessage> CryptoFramer::ParseMessage(
absl::string_view in) {
OneShotVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
if (!framer.ProcessInput(in) || visitor.error() ||
framer.InputBytesRemaining()) {
return nullptr;
}
return visitor.release();
}
QuicErrorCode CryptoFramer::error() const { return error_; }
const std::string& CryptoFramer::error_detail() const { return error_detail_; }
bool CryptoFramer::ProcessInput(absl::string_view input,
EncryptionLevel ) {
return ProcessInput(input);
}
bool CryptoFramer::ProcessInput(absl::string_view input) {
QUICHE_DCHECK_EQ(QUIC_NO_ERROR, error_);
if (error_ != QUIC_NO_ERROR) {
return false;
}
error_ = Process(input);
if (error_ != QUIC_NO_ERROR) {
QUICHE_DCHECK(!error_detail_.empty());
visitor_->OnError(this);
return false;
}
return true;
}
size_t CryptoFramer::InputBytesRemaining() const { return buffer_.length(); }
bool CryptoFramer::HasTag(QuicTag tag) const {
if (state_ != STATE_READING_VALUES) {
return false;
}
for (const auto& it : tags_and_lengths_) {
if (it.first == tag) {
return true;
}
}
return false;
}
void CryptoFramer::ForceHandshake() {
QuicDataReader reader(buffer_.data(), buffer_.length(),
quiche::HOST_BYTE_ORDER);
for (const std::pair<QuicTag, size_t>& item : tags_and_lengths_) {
absl::string_view value;
if (reader.BytesRemaining() < item.second) {
break;
}
reader.ReadStringPiece(&value, item.second);
message_.SetStringPiece(item.first, value);
}
visitor_->OnHandshakeMessage(message_);
}
std::unique_ptr<QuicData> CryptoFramer::ConstructHandshakeMessage(
const CryptoHandshakeMessage& message) {
size_t num_entries = message.tag_value_map().size();
size_t pad_length = 0;
bool need_pad_tag = false;
bool need_pad_value = false;
size_t len = message.size();
if (len < message.minimum_size()) {
need_pad_tag = true;
need_pad_value = true;
num_entries++;
size_t delta = message.minimum_size() - len;
const size_t overhead = kQuicTagSize + kCryptoEndOffsetSize;
if (delta > overhead) {
pad_length = delta - overhead;
}
len += overhead + pad_length;
}
if (num_entries > kMaxEntries) {
return nullptr;
}
std::unique_ptr<char[]> buffer(new char[len]);
QuicDataWriter writer(len, buffer.get(), quiche::HOST_BYTE_ORDER);
if (!writer.WriteTag(message.tag())) {
QUICHE_DCHECK(false) << "Failed to write message tag.";
return nullptr;
}
if (!writer.WriteUInt16(static_cast<uint16_t>(num_entries))) {
QUICHE_DCHECK(false) << "Failed to write size.";
return nullptr;
}
if (!writer.WriteUInt16(0)) {
QUICHE_DCHECK(false) << "Failed to write padding.";
return nullptr;
}
uint32_t end_offset = 0;
for (auto it = message.tag_value_map().begin();
it != message.tag_value_map().end(); ++it) {
if (it->first == kPAD && need_pad_tag) {
QUICHE_DCHECK(false)
<< "Message needed padding but already contained a PAD tag";
return nullptr;
}
if (it->first > kPAD && need_pad_tag) {
need_pad_tag = false;
if (!WritePadTag(&writer, pad_length, &end_offset)) {
return nullptr;
}
}
if (!writer.WriteTag(it->first)) {
QUICHE_DCHECK(false) << "Failed to write tag.";
return nullptr;
}
end_offset += it->second.length();
if (!writer.WriteUInt32(end_offset)) {
QUICHE_DCHECK(false) << "Failed to write end offset.";
return nullptr;
}
}
if (need_pad_tag) {
if (!WritePadTag(&writer, pad_length, &end_offset)) {
return nullptr;
}
}
for (auto it = message.tag_value_map().begin();
it != message.tag_value_map().end(); ++it) {
if (it->first > kPAD && need_pad_value) {
need_pad_value = false;
if (!writer.WriteRepeatedByte('-', pad_length)) {
QUICHE_DCHECK(false) << "Failed to write padding.";
return nullptr;
}
}
if (!writer.WriteBytes(it->second.data(), it->second.length())) {
QUICHE_DCHECK(false) << "Failed to write value.";
return nullptr;
}
}
if (need_pad_value) {
if (!writer.WriteRepeatedByte('-', pad_length)) {
QUICHE_DCHECK(false) << "Failed to write padding.";
return nullptr;
}
}
return std::make_unique<QuicData>(buffer.release(), len, true);
}
void CryptoFramer::Clear() {
message_.Clear();
tags_and_lengths_.clear();
error_ = QUIC_NO_ERROR;
error_detail_ = "";
state_ = STATE_READING_TAG;
}
QuicErrorCode CryptoFramer::Process(absl::string_view input) {
buffer_.append(input.data(), input.length());
QuicDataReader reader(buffer_.data(), buffer_.length(),
quiche::HOST_BYTE_ORDER);
switch (state_) {
case STATE_READING_TAG:
if (reader.BytesRemaining() < kQuicTagSize) {
break;
}
QuicTag message_tag;
reader.ReadTag(&message_tag);
message_.set_tag(message_tag);
state_ = STATE_READING_NUM_ENTRIES;
ABSL_FALLTHROUGH_INTENDED;
case STATE_READING_NUM_ENTRIES:
if (reader.BytesRemaining() < kNumEntriesSize + sizeof(uint16_t)) {
break;
}
reader.ReadUInt16(&num_entries_);
if (num_entries_ > kMaxEntries) {
error_detail_ = absl::StrCat(num_entries_, " entries");
return QUIC_CRYPTO_TOO_MANY_ENTRIES;
}
uint16_t padding;
reader.ReadUInt16(&padding);
tags_and_lengths_.reserve(num_entries_);
state_ = STATE_READING_TAGS_AND_LENGTHS;
values_len_ = 0;
ABSL_FALLTHROUGH_INTENDED;
case STATE_READING_TAGS_AND_LENGTHS: {
if (reader.BytesRemaining() <
num_entries_ * (kQuicTagSize + kCryptoEndOffsetSize)) {
break;
}
uint32_t last_end_offset = 0;
for (unsigned i = 0; i < num_entries_; ++i) {
QuicTag tag;
reader.ReadTag(&tag);
if (i > 0 && tag <= tags_and_lengths_[i - 1].first) {
if (tag == tags_and_lengths_[i - 1].first) {
error_detail_ = absl::StrCat("Duplicate tag:", tag);
return QUIC_CRYPTO_DUPLICATE_TAG;
}
error_detail_ = absl::StrCat("Tag ", tag, " out of order");
return QUIC_CRYPTO_TAGS_OUT_OF_ORDER;
}
uint32_t end_offset;
reader.ReadUInt32(&end_offset);
if (end_offset < last_end_offset) {
error_detail_ =
absl::StrCat("End offset: ", end_offset, " vs ", last_end_offset);
return QUIC_CRYPTO_TAGS_OUT_OF_ORDER;
}
tags_and_lengths_.push_back(std::make_pair(
tag, static_cast<size_t>(end_offset - last_end_offset)));
last_end_offset = end_offset;
}
values_len_ = last_end_offset;
state_ = STATE_READING_VALUES;
ABSL_FALLTHROUGH_INTENDED;
}
case STATE_READING_VALUES:
if (reader.BytesRemaining() < values_len_) {
if (!process_truncated_messages_) {
break;
}
QUIC_LOG(ERROR) << "Trunacted message. Missing "
<< values_len_ - reader.BytesRemaining() << " bytes.";
}
for (const std::pair<QuicTag, size_t>& item : tags_and_lengths_) {
absl::string_view value;
if (!reader.ReadStringPiece(&value, item.second)) {
QUICHE_DCHECK(process_truncated_messages_);
message_.SetStringPiece(item.first, "");
continue;
}
message_.SetStringPiece(item.first, value);
}
visitor_->OnHandshakeMessage(message_);
Clear();
state_ = STATE_READING_TAG;
break;
}
buffer_ = std::string(reader.PeekRemainingPayload());
return QUIC_NO_ERROR;
}
bool CryptoFramer::WritePadTag(QuicDataWriter* writer, size_t pad_length,
uint32_t* end_offset) {
if (!writer->WriteTag(kPAD)) {
QUICHE_DCHECK(false) << "Failed to write tag.";
return false;
}
*end_offset += pad_length;
if (!writer->WriteUInt32(*end_offset)) {
QUICHE_DCHECK(false) << "Failed to write end offset.";
return false;
}
return true;
}
} | #include "quiche/quic/core/crypto/crypto_framer.h"
#include <map>
#include <memory>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
char* AsChars(unsigned char* data) { return reinterpret_cast<char*>(data); }
class TestCryptoVisitor : public CryptoFramerVisitorInterface {
public:
TestCryptoVisitor() : error_count_(0) {}
void OnError(CryptoFramer* framer) override {
QUIC_DLOG(ERROR) << "CryptoFramer Error: " << framer->error();
++error_count_;
}
void OnHandshakeMessage(const CryptoHandshakeMessage& message) override {
messages_.push_back(message);
}
int error_count_;
std::vector<CryptoHandshakeMessage> messages_;
};
TEST(CryptoFramerTest, ConstructHandshakeMessage) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
message.SetStringPiece(0x12345678, "abcdef");
message.SetStringPiece(0x12345679, "ghijk");
message.SetStringPiece(0x1234567A, "lmnopqr");
unsigned char packet[] = {
0x33, 0x77, 0xAA, 0xFF,
0x03, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x06, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x0b, 0x00, 0x00, 0x00,
0x7A, 0x56, 0x34, 0x12,
0x12, 0x00, 0x00, 0x00,
'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r'};
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
ASSERT_TRUE(data != nullptr);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data->data(), data->length(), AsChars(packet),
ABSL_ARRAYSIZE(packet));
}
TEST(CryptoFramerTest, ConstructHandshakeMessageWithTwoKeys) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
message.SetStringPiece(0x12345678, "abcdef");
message.SetStringPiece(0x12345679, "ghijk");
unsigned char packet[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x06, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x0b, 0x00, 0x00, 0x00,
'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k'};
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
ASSERT_TRUE(data != nullptr);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data->data(), data->length(), AsChars(packet),
ABSL_ARRAYSIZE(packet));
}
TEST(CryptoFramerTest, ConstructHandshakeMessageZeroLength) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
message.SetStringPiece(0x12345678, "");
unsigned char packet[] = {
0x33, 0x77, 0xAA, 0xFF,
0x01, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x00, 0x00, 0x00, 0x00};
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
ASSERT_TRUE(data != nullptr);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data->data(), data->length(), AsChars(packet),
ABSL_ARRAYSIZE(packet));
}
TEST(CryptoFramerTest, ConstructHandshakeMessageTooManyEntries) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
for (uint32_t key = 1; key <= kMaxEntries + 1; ++key) {
message.SetStringPiece(key, "abcdef");
}
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
EXPECT_TRUE(data == nullptr);
}
TEST(CryptoFramerTest, ConstructHandshakeMessageMinimumSize) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
message.SetStringPiece(0x01020304, "test");
message.set_minimum_size(64);
unsigned char packet[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
'P', 'A', 'D', 0,
0x24, 0x00, 0x00, 0x00,
0x04, 0x03, 0x02, 0x01,
0x28, 0x00, 0x00, 0x00,
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-',
't', 'e', 's', 't'};
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
ASSERT_TRUE(data != nullptr);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data->data(), data->length(), AsChars(packet),
ABSL_ARRAYSIZE(packet));
}
TEST(CryptoFramerTest, ConstructHandshakeMessageMinimumSizePadLast) {
CryptoHandshakeMessage message;
message.set_tag(0xFFAA7733);
message.SetStringPiece(1, "");
message.set_minimum_size(64);
unsigned char packet[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x01, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
'P', 'A', 'D', 0,
0x28, 0x00, 0x00, 0x00,
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-',
'-', '-', '-', '-', '-', '-', '-', '-', '-', '-'};
CryptoFramer framer;
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
ASSERT_TRUE(data != nullptr);
quiche::test::CompareCharArraysWithHexError(
"constructed packet", data->data(), data->length(), AsChars(packet),
ABSL_ARRAYSIZE(packet));
}
TEST(CryptoFramerTest, ProcessInput) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x06, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x0b, 0x00, 0x00, 0x00,
'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k'};
EXPECT_TRUE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_EQ(0u, framer.InputBytesRemaining());
EXPECT_EQ(0, visitor.error_count_);
ASSERT_EQ(1u, visitor.messages_.size());
const CryptoHandshakeMessage& message = visitor.messages_[0];
EXPECT_EQ(0xFFAA7733, message.tag());
EXPECT_EQ(2u, message.tag_value_map().size());
EXPECT_EQ("abcdef", crypto_test_utils::GetValueForTag(message, 0x12345678));
EXPECT_EQ("ghijk", crypto_test_utils::GetValueForTag(message, 0x12345679));
}
TEST(CryptoFramerTest, ProcessInputWithThreeKeys) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x03, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x06, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x0b, 0x00, 0x00, 0x00,
0x7A, 0x56, 0x34, 0x12,
0x12, 0x00, 0x00, 0x00,
'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r'};
EXPECT_TRUE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_EQ(0u, framer.InputBytesRemaining());
EXPECT_EQ(0, visitor.error_count_);
ASSERT_EQ(1u, visitor.messages_.size());
const CryptoHandshakeMessage& message = visitor.messages_[0];
EXPECT_EQ(0xFFAA7733, message.tag());
EXPECT_EQ(3u, message.tag_value_map().size());
EXPECT_EQ("abcdef", crypto_test_utils::GetValueForTag(message, 0x12345678));
EXPECT_EQ("ghijk", crypto_test_utils::GetValueForTag(message, 0x12345679));
EXPECT_EQ("lmnopqr", crypto_test_utils::GetValueForTag(message, 0x1234567A));
}
TEST(CryptoFramerTest, ProcessInputIncrementally) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x06, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x0b, 0x00, 0x00, 0x00,
'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k'};
for (size_t i = 0; i < ABSL_ARRAYSIZE(input); i++) {
EXPECT_TRUE(framer.ProcessInput(absl::string_view(AsChars(input) + i, 1)));
}
EXPECT_EQ(0u, framer.InputBytesRemaining());
ASSERT_EQ(1u, visitor.messages_.size());
const CryptoHandshakeMessage& message = visitor.messages_[0];
EXPECT_EQ(0xFFAA7733, message.tag());
EXPECT_EQ(2u, message.tag_value_map().size());
EXPECT_EQ("abcdef", crypto_test_utils::GetValueForTag(message, 0x12345678));
EXPECT_EQ("ghijk", crypto_test_utils::GetValueForTag(message, 0x12345679));
}
TEST(CryptoFramerTest, ProcessInputTagsOutOfOrder) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x13,
0x01, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x02, 0x00, 0x00, 0x00};
EXPECT_FALSE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_THAT(framer.error(), IsError(QUIC_CRYPTO_TAGS_OUT_OF_ORDER));
EXPECT_EQ(1, visitor.error_count_);
}
TEST(CryptoFramerTest, ProcessEndOffsetsOutOfOrder) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x01, 0x00, 0x00, 0x00,
0x78, 0x56, 0x34, 0x13,
0x00, 0x00, 0x00, 0x00};
EXPECT_FALSE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_THAT(framer.error(), IsError(QUIC_CRYPTO_TAGS_OUT_OF_ORDER));
EXPECT_EQ(1, visitor.error_count_);
}
TEST(CryptoFramerTest, ProcessInputTooManyEntries) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0xA0, 0x00,
0x00, 0x00};
EXPECT_FALSE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_THAT(framer.error(), IsError(QUIC_CRYPTO_TOO_MANY_ENTRIES));
EXPECT_EQ(1, visitor.error_count_);
}
TEST(CryptoFramerTest, ProcessInputZeroLength) {
test::TestCryptoVisitor visitor;
CryptoFramer framer;
framer.set_visitor(&visitor);
unsigned char input[] = {
0x33, 0x77, 0xAA, 0xFF,
0x02, 0x00,
0x00, 0x00,
0x78, 0x56, 0x34, 0x12,
0x00, 0x00, 0x00, 0x00,
0x79, 0x56, 0x34, 0x12,
0x05, 0x00, 0x00, 0x00};
EXPECT_TRUE(framer.ProcessInput(
absl::string_view(AsChars(input), ABSL_ARRAYSIZE(input))));
EXPECT_EQ(0, visitor.error_count_);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/crypto_framer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/crypto_framer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f0dde765-55b6-493c-95a6-d09131a7d8c2 | cpp | tensorflow/tensorflow | while_thunk | third_party/xla/xla/service/gpu/runtime/while_thunk.cc | third_party/xla/xla/backends/cpu/runtime/while_thunk_test.cc | #include "xla/service/gpu/runtime/while_thunk.h"
#include <cstdint>
#include <iterator>
#include <list>
#include <memory>
#include <optional>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static std::list<int64_t>& LoopCounters() {
static thread_local std::list<int64_t> loop_counters;
return loop_counters;
}
absl::StatusOr<int64_t> WhileThunk::CurrentLoopIteration(int64_t depth) {
if (depth >= LoopCounters().size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"Loop depth %d is greater than the number of tracked loops %d", depth,
LoopCounters().size()));
}
auto counter = LoopCounters().begin();
std::advance(counter, depth);
return *counter;
}
WhileThunk::WhileThunk(
ThunkInfo thunk_info,
const BufferAllocation::Slice& condition_result_buffer_index,
std::unique_ptr<SequentialThunk> condition_thunk_sequence,
std::unique_ptr<SequentialThunk> body_thunk_sequence,
std::optional<int64_t> trip_count)
: Thunk(Kind::kWhile, thunk_info),
condition_result_buffer_index_(condition_result_buffer_index),
condition_thunk_sequence_(std::move(condition_thunk_sequence)),
body_thunk_sequence_(std::move(body_thunk_sequence)),
trip_count_(trip_count) {}
absl::Status WhileThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
TF_RETURN_IF_ERROR(
condition_thunk_sequence_->Prepare(params, resource_requests));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status WhileThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(condition_thunk_sequence_->Initialize(params));
TF_RETURN_IF_ERROR(body_thunk_sequence_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(sizeof(bool)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status WhileThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
int64_t& iter = LoopCounters().emplace_front();
absl::Cleanup cleanup = [&] { LoopCounters().pop_front(); };
se::DeviceMemoryBase condition_result_data =
params.buffer_allocations->GetDeviceAddress(
condition_result_buffer_index_);
if (trip_count_.has_value()) {
VLOG(2) << "Executing WhileThunk for " << *trip_count_ << " iterations";
for (iter = 0; iter < trip_count_; ++iter) {
VLOG(3) << "Executing iteration # " << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
}
return absl::OkStatus();
}
bool* condition_result = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<bool*>(predicates_.at(stream.parent())->opaque());
}();
while (true) {
VLOG(3) << "Executing WhileThunk condition computation; iter=" << iter;
TF_RETURN_IF_ERROR(condition_thunk_sequence_->ExecuteOnStream(params));
TF_RETURN_IF_ERROR(
stream.Memcpy(condition_result, condition_result_data, sizeof(bool)));
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return absl::InternalError(absl::StrFormat(
"Failed to complete all kernels launched on stream %p: %s", &stream,
blocked.message()));
}
VLOG(3) << "condition_result = " << *condition_result;
if (!*condition_result) {
VLOG(3) << "Break WhileThunk loop; iter=" << iter;
break;
}
VLOG(3) << "Executing WhileThunk body computation; iter=" << iter;
TF_RETURN_IF_ERROR(body_thunk_sequence_->ExecuteOnStream(params));
++iter;
}
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/while_thunk.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/backends/cpu/runtime/buffer_allocations.h"
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_testlib.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/tsl/concurrency/async_value_ref.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "Eigen/ThreadPool"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla::cpu {
namespace {
TEST(WhileThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice pred_slice(&alloc, 0, sizeof(char));
BufferAllocation::Slice cond_read_slice(&alloc, 10, 10);
BufferAllocation::Slice body_read_slice(&alloc, 20, 10);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(cond_read_slice)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(body_read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->buffer_uses().size(), 3);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Write(pred_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(cond_read_slice));
EXPECT_EQ(thunk->buffer_uses()[2], BufferUse::Read(body_read_slice));
}
TEST(WhileThunkTest, ResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice pred_slice(&alloc, 0, sizeof(char));
auto token0 = Resource::Create(Resource::kToken);
auto token1 = Resource::Create(Resource::kToken);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token0)));
ThunkSequence body_sequence;
body_sequence.push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token1)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
EXPECT_EQ(thunk->resource_uses().size(), 2);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(token0));
EXPECT_EQ(thunk->resource_uses()[1], ResourceUse::Read(token1));
}
class CondThunk : public Thunk {
public:
CondThunk(size_t counter, BufferAllocation::Slice pred_slice)
: Thunk(Kind::kKernel, {"cond"}),
counter_(counter + 1),
pred_slice_(pred_slice) {}
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase predicate_mem,
params.buffer_allocations->GetDeviceAddress(pred_slice_));
bool* predicate = reinterpret_cast<bool*>(predicate_mem.opaque());
*predicate = counter_.fetch_sub(1) > 1;
params.intra_op_threadpool->getPool()->Schedule(
[event] { event.SetStateConcrete(); });
return event;
}
BufferUses buffer_uses() const final {
return {BufferUse::Write(pred_slice_)};
}
private:
std::atomic<size_t> counter_;
BufferAllocation::Slice pred_slice_;
};
class BodyThunk : public Thunk {
public:
explicit BodyThunk(BufferAllocation::Slice counter_slice)
: Thunk(Kind::kKernel, {"body"}), counter_slice_(counter_slice) {}
tsl::AsyncValueRef<ExecuteEvent> Execute(const ExecuteParams& params) final {
auto event = tsl::MakeConstructedAsyncValueRef<ExecuteEvent>();
TF_ASSIGN_OR_RETURN(
se::DeviceMemoryBase counter_mem,
params.buffer_allocations->GetDeviceAddress(counter_slice_));
int32_t* counter = reinterpret_cast<int32_t*>(counter_mem.opaque());
++*counter;
params.intra_op_threadpool->getPool()->Schedule(
[event] { event.SetStateConcrete(); });
return event;
}
BufferUses buffer_uses() const final { return {}; }
private:
BufferAllocation::Slice counter_slice_;
};
TEST(WhileThunkTest, NonBlockingExecute) {
static constexpr size_t kNumIterations = 100;
BufferAllocation pred_alloc(0, sizeof(char), 0);
BufferAllocation cnt_alloc(1, sizeof(int32_t), 0);
BufferAllocation::Slice pred_slice(&pred_alloc, 0, sizeof(char));
BufferAllocation::Slice cnt_slice(&cnt_alloc, 0, sizeof(int32_t));
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<char> predicate = {false};
std::vector<int32_t> counter = {0};
buffers.emplace_back(se::DeviceMemoryBase(predicate.data(), sizeof(char)));
buffers.emplace_back(se::DeviceMemoryBase(counter.data(), sizeof(int32_t)));
BufferAllocations allocations(buffers);
ThunkSequence cond_sequence;
cond_sequence.push_back(
std::make_unique<CondThunk>(kNumIterations, pred_slice));
ThunkSequence body_sequence;
body_sequence.push_back(std::make_unique<BodyThunk>(cnt_slice));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk,
WhileThunk::Create({"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence)));
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "while-test", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.intra_op_threadpool = &device;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(counter[0], kNumIterations);
}
TEST(WhileThunkTest, NonBlockingExecuteWithTripCount) {
static constexpr size_t kNumIterations = 100;
BufferAllocation pred_alloc(0, sizeof(char), 0);
BufferAllocation cnt_alloc(1, sizeof(int32_t), 0);
BufferAllocation::Slice pred_slice(&pred_alloc, 0, sizeof(char));
BufferAllocation::Slice cnt_slice(&cnt_alloc, 0, sizeof(int32_t));
std::vector<MaybeOwningDeviceMemory> buffers;
std::vector<char> predicate = {false};
std::vector<int32_t> counter = {0};
buffers.emplace_back(se::DeviceMemoryBase(predicate.data(), sizeof(char)));
buffers.emplace_back(se::DeviceMemoryBase(counter.data(), sizeof(int32_t)));
BufferAllocations allocations(buffers);
ThunkSequence cond_sequence;
ThunkSequence body_sequence;
body_sequence.push_back(std::make_unique<BodyThunk>(cnt_slice));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, WhileThunk::Create(
{"while"}, pred_slice, std::move(cond_sequence),
std::move(body_sequence), kNumIterations));
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "while-test", 8);
Eigen::ThreadPoolDevice device(thread_pool.AsEigenThreadPool(),
thread_pool.NumThreads());
Thunk::ExecuteParams params;
params.buffer_allocations = &allocations;
params.intra_op_threadpool = &device;
auto execute_event = thunk->Execute(params);
tsl::BlockUntilReady(execute_event);
ASSERT_FALSE(execute_event.IsError());
EXPECT_EQ(counter[0], kNumIterations);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/while_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/while_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9fac70a8-155a-4007-a2a4-694a429dfdaa | cpp | google/arolla | expr_visitor | arolla/expr/expr_visitor.cc | arolla/expr/expr_visitor_test.cc | #include "arolla/expr/expr_visitor.h"
#include <cstddef>
#include <limits>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
template <class PrevisitFn, class PostVisitFn>
void VisitorOrderImpl(const ExprNodePtr& root, PrevisitFn previsit_fn,
PostVisitFn postvisit_fn) {
struct Frame {
const ExprNodePtr& node;
size_t processed_deps_count = 0;
};
absl::flat_hash_set<Fingerprint> visited = {root->fingerprint()};
std::vector<Frame> stack = {Frame{root}};
while (!stack.empty()) {
auto& frame = stack.back();
if (frame.processed_deps_count == 0) {
previsit_fn(frame.node);
}
const auto& node_deps = frame.node->node_deps();
if (frame.processed_deps_count == node_deps.size()) {
postvisit_fn(frame.node);
stack.pop_back();
continue;
}
const auto& dep = node_deps[frame.processed_deps_count++];
if (visited.insert(dep->fingerprint()).second) {
stack.push_back(Frame{dep});
}
}
}
}
std::vector<ExprNodePtr> VisitorOrder(ExprNodePtr root) {
std::vector<ExprNodePtr> res_visits;
VisitorOrderImpl(
root, [](auto) {},
[&res_visits](const auto& node) { res_visits.push_back(node); });
return res_visits;
}
std::vector<std::pair<bool, ExprNodePtr>> PreAndPostVisitorOrder(
ExprNodePtr root) {
std::vector<std::pair<bool, ExprNodePtr>> res_visits;
VisitorOrderImpl(
root,
[&res_visits](const auto& node) { res_visits.emplace_back(true, node); },
[&res_visits](const auto& node) {
res_visits.emplace_back(false, node);
});
return res_visits;
}
PostOrder::PostOrder(const ExprNodePtr& root) {
struct Frame {
const ExprNodePtr& node;
size_t dep_idx = 0;
};
absl::flat_hash_map<Fingerprint, size_t> node_indices;
{
std::vector<Frame> stack;
stack.push_back(Frame{root});
while (!stack.empty()) {
auto& frame = stack.back();
const auto& deps = frame.node->node_deps();
while (frame.dep_idx < deps.size() &&
node_indices.contains(deps[frame.dep_idx]->fingerprint())) {
++frame.dep_idx;
}
if (frame.dep_idx < deps.size()) {
stack.push_back(Frame{deps[frame.dep_idx++]});
} else {
node_indices.emplace(frame.node->fingerprint(), nodes_.size());
nodes_.push_back(frame.node);
stack.pop_back();
}
}
}
{
size_t total_arc_count = 0;
for (const auto& node : nodes_) {
total_arc_count += node->node_deps().size();
}
adjacency_array_.resize(nodes_.size() + 1 + total_arc_count);
size_t i = 0;
size_t j = nodes_.size() + 1;
while (i < nodes_.size()) {
adjacency_array_[i] = j;
for (const auto& dep : nodes_[i++]->node_deps()) {
adjacency_array_[j++] = node_indices.at(dep->fingerprint());
}
}
adjacency_array_[nodes_.size()] = j;
}
}
absl::StatusOr<ExprNodePtr> DeepTransform(
const ExprNodePtr& root,
absl::FunctionRef<absl::StatusOr<ExprNodePtr>(ExprNodePtr)> transform_fn,
std::optional<LogTransformationFn> log_transformation_fn,
size_t processed_node_limit) {
constexpr size_t kSkipFirstStage = std::numeric_limits<size_t>::max();
constexpr auto infinite_loop_error = [](const ExprNodePtr& node) {
return absl::FailedPreconditionError(absl::StrFormat(
"infinite loop of node transformations containing node %s",
GetDebugSnippet(node)));
};
struct Frame {
ExprNodePtr node;
size_t dep_idx = 0;
Fingerprint new_node_fingerprint;
Fingerprint transformed_new_node_fingerprint;
std::optional<ExprNodePtr> original_node = std::nullopt;
};
absl::flat_hash_map<Fingerprint, ExprNodePtr> cache;
std::stack<Frame> stack;
cache.emplace(root->fingerprint(), nullptr);
stack.emplace(Frame{.node = root});
while (!stack.empty()) {
auto& frame = stack.top();
if (cache.size() > processed_node_limit) {
return absl::FailedPreconditionError(absl::StrFormat(
"too many processed nodes (%i), this probably means an infinite "
"transformation. Possibly caused by node %s",
cache.size(), GetDebugSnippet(frame.node)));
}
if (frame.dep_idx != kSkipFirstStage) {
const auto& deps = frame.node->node_deps();
while (
frame.dep_idx < deps.size() &&
!cache.emplace(deps[frame.dep_idx]->fingerprint(), nullptr).second) {
++frame.dep_idx;
}
if (frame.dep_idx < deps.size()) {
if (log_transformation_fn.has_value() &&
frame.original_node != std::nullopt) {
(*log_transformation_fn)(
deps[frame.dep_idx], frame.node,
DeepTransformStage::kNewChildAfterTransformation);
}
stack.emplace(Frame{.node = deps[frame.dep_idx++],
.original_node = frame.original_node});
continue;
}
std::vector<ExprNodePtr> new_deps(deps.size());
for (size_t i = 0; i < deps.size(); ++i) {
new_deps[i] = cache[deps[i]->fingerprint()];
if (new_deps[i] == nullptr) {
return infinite_loop_error(frame.node);
}
}
ASSIGN_OR_RETURN(auto new_node,
WithNewDependencies(frame.node, std::move(new_deps)));
if (log_transformation_fn.has_value()) {
(*log_transformation_fn)(new_node, frame.node,
DeepTransformStage::kWithNewDeps);
}
if (new_node->fingerprint() != frame.node->fingerprint()) {
if (auto [it, miss] = cache.emplace(new_node->fingerprint(), nullptr);
!miss) {
if (it->second == nullptr) {
return infinite_loop_error(frame.node);
}
cache[frame.node->fingerprint()] = it->second;
stack.pop();
continue;
}
}
ASSIGN_OR_RETURN(
auto transformed_new_node, transform_fn(new_node),
_ << "while transforming " << GetDebugSnippet(frame.node));
DCHECK_NE(transformed_new_node, nullptr);
if (transformed_new_node->fingerprint() == new_node->fingerprint()) {
cache[frame.node->fingerprint()] = std::move(transformed_new_node);
if (new_node->fingerprint() != frame.node->fingerprint()) {
cache[new_node->fingerprint()] = std::move(new_node);
}
stack.pop();
continue;
}
if (auto [it, miss] =
cache.emplace(transformed_new_node->fingerprint(), nullptr);
!miss) {
if (it->second == nullptr) {
return infinite_loop_error(frame.node);
}
cache[frame.node->fingerprint()] = it->second;
if (new_node->fingerprint() != frame.node->fingerprint()) {
cache[new_node->fingerprint()] = it->second;
}
stack.pop();
continue;
}
frame.dep_idx = kSkipFirstStage;
frame.new_node_fingerprint = new_node->fingerprint();
frame.transformed_new_node_fingerprint =
transformed_new_node->fingerprint();
stack.emplace(Frame{.node = transformed_new_node,
.original_node = transformed_new_node});
continue;
}
const auto& node_result = cache.at(frame.transformed_new_node_fingerprint);
DCHECK_NE(node_result, nullptr);
cache[frame.node->fingerprint()] = node_result;
if (frame.new_node_fingerprint != frame.node->fingerprint()) {
cache[frame.new_node_fingerprint] = node_result;
}
stack.pop();
}
auto& root_result = cache.at(root->fingerprint());
DCHECK_NE(root_result, nullptr);
return std::move(root_result);
}
} | #include "arolla/expr/expr_visitor.h"
#include <cstddef>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::testing::DummyOp;
using ::arolla::testing::EqualsExpr;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Pair;
using ::testing::Pointer;
size_t CountNodes(const ExprNodePtr& expr) {
size_t result = 0;
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& ,
absl::Span<const size_t* const> ) { return ++result; });
}
class ExprVisitorTest : public ::testing::Test {
public:
template <typename... Args>
ExprNodePtr Bar(Args&&... args) {
return CallOp(bar_, {std::forward<Args>(args)...}).value();
}
template <typename... Args>
ExprNodePtr Baz(Args&&... args) {
return CallOp(baz_, {std::forward<Args>(args)...}).value();
}
template <typename... Args>
ExprNodePtr Qux(Args&&... args) {
return CallOp(qux_, {std::forward<Args>(args)...}).value();
}
protected:
ExprOperatorPtr bar_ = std::make_shared<DummyOp>(
"bar", ExprOperatorSignature::MakeVariadicArgs());
ExprOperatorPtr baz_ = std::make_shared<DummyOp>(
"baz", ExprOperatorSignature::MakeVariadicArgs());
ExprOperatorPtr qux_ = std::make_shared<DummyOp>(
"qux", ExprOperatorSignature::MakeVariadicArgs());
};
TEST_F(ExprVisitorTest, PostOrder_Trivial) {
auto x0 = Leaf("x0");
PostOrder post_order(x0);
ASSERT_THAT(post_order.nodes(), ElementsAre(Pointer(x0.get())));
ASSERT_THAT(post_order.dep_indices(0), ElementsAre());
}
TEST_F(ExprVisitorTest, PostOrder) {
auto x0 = Leaf("x0");
auto x1 = Leaf("x1");
auto x2 = Leaf("x2");
auto add01 = Bar(x0, x1);
auto add012 = Bar(add01, x0, x1, x2);
PostOrder post_order(add012);
ASSERT_THAT(
post_order.nodes(),
ElementsAre(Pointer(x0.get()), Pointer(x1.get()), Pointer(add01.get()),
Pointer(x2.get()), Pointer(add012.get())));
ASSERT_THAT(post_order.dep_indices(0), ElementsAre());
ASSERT_THAT(post_order.dep_indices(1), ElementsAre());
ASSERT_THAT(post_order.dep_indices(2), ElementsAre(0, 1));
ASSERT_THAT(post_order.dep_indices(3), ElementsAre());
ASSERT_THAT(post_order.dep_indices(4), ElementsAre(2, 0, 1, 3));
}
TEST_F(ExprVisitorTest, VisitOrder) {
auto x0 = Leaf("x0");
auto x1 = Leaf("x1");
auto x2 = Leaf("x2");
auto add01 = Bar(x0, x1);
auto add012 = Bar(add01, x2);
std::vector<ExprNodePtr> actual_order = VisitorOrder(add012);
ASSERT_THAT(actual_order, ElementsAre(Pointer(x0.get()), Pointer(x1.get()),
Pointer(add01.get()), Pointer(x2.get()),
Pointer(add012.get())));
}
TEST_F(ExprVisitorTest, PreAndPostVisitorOrder) {
auto x0 = Leaf("x0");
auto x1 = Leaf("x1");
auto x2 = Leaf("x2");
auto add01 = Bar(x0, x1);
auto add012 = Bar(add01, x2);
std::vector<std::pair<bool, ExprNodePtr>> actual_order =
PreAndPostVisitorOrder(add012);
ASSERT_THAT(
actual_order,
ElementsAre(
Pair(true, Pointer(add012.get())), Pair(true, Pointer(add01.get())),
Pair(true, Pointer(x0.get())), Pair(false, Pointer(x0.get())),
Pair(true, Pointer(x1.get())), Pair(false, Pointer(x1.get())),
Pair(false, Pointer(add01.get())), Pair(true, Pointer(x2.get())),
Pair(false, Pointer(x2.get())), Pair(false, Pointer(add012.get()))));
}
TEST_F(ExprVisitorTest, PostOrderTraverseBool) {
ASSERT_TRUE(PostOrderTraverse(
Leaf("x"),
[](ExprNodePtr, absl::Span<bool const* const>) -> bool { return true; }));
}
TEST_F(ExprVisitorTest, PostOrderTraverseStatusOrBool) {
ASSERT_THAT(PostOrderTraverse(Leaf("x"),
[](ExprNodePtr, absl::Span<bool const* const>) {
return absl::StatusOr<bool>(true);
}),
IsOkAndHolds(true));
}
TEST_F(ExprVisitorTest, VisitLeaf) { ASSERT_EQ(CountNodes(Leaf("x")), 1); }
TEST_F(ExprVisitorTest, VisitOperator) {
ASSERT_EQ(CountNodes(Bar(Leaf("x"), Leaf("y"))), 3);
}
TEST_F(ExprVisitorTest, LargeAst) {
ASSERT_EQ(CountNodes(Bar(Bar(Leaf("x"), Leaf("y")), Leaf("x"))), 4);
}
TEST_F(ExprVisitorTest, Transform_WithStatusOrFn) {
auto expr = Bar(Bar(Baz(Leaf("a"), Leaf("b")), Leaf("c")), Leaf("d"));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr_with_qux,
Transform(expr, [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (node->op() == bar_) {
return WithNewOperator(node, qux_);
}
return node;
}));
ASSERT_THAT(
expr_with_qux,
EqualsExpr(Qux(Qux(Baz(Leaf("a"), Leaf("b")), Leaf("c")), Leaf("d"))));
EXPECT_THAT(expr_with_qux->node_deps()[0]->node_deps()[0].get(),
Eq(expr->node_deps()[0]->node_deps()[0].get()));
}
TEST_F(ExprVisitorTest, Transform_WithNoStatusFn) {
auto expr = Bar(Bar(Baz(Leaf("a"), Leaf("b")), Leaf("c")), Leaf("d"));
EXPECT_THAT(Transform(expr,
[&](ExprNodePtr node) -> ExprNodePtr {
if (node->op() == bar_) {
return node->node_deps()[0];
} else {
return node;
}
}),
IsOkAndHolds(EqualsExpr(expr->node_deps()[0]->node_deps()[0])));
}
TEST_F(ExprVisitorTest, Transform_NoChangeRequired) {
auto expr = Baz(Bar(Baz(Leaf("a"), Leaf("b")), Leaf("c")), Leaf("d"));
EXPECT_THAT(Transform(expr, [](ExprNodePtr node) { return node; }),
IsOkAndHolds(EqualsExpr(expr)));
}
class DeepTransformTest : public ::testing::Test {
public:
template <typename... Args>
ExprNodePtr A(Args&&... args) {
return CallOp(a_, {std::forward<Args>(args)...}).value();
}
template <typename... Args>
ExprNodePtr B(Args&&... args) {
return CallOp(b_, {std::forward<Args>(args)...}).value();
}
template <typename... Args>
ExprNodePtr S(Args&&... args) {
return CallOp(s_, {std::forward<Args>(args)...}).value();
}
template <typename... Args>
ExprNodePtr C(Args&&... args) {
return CallOp(c_, {std::forward<Args>(args)...}).value();
}
auto SabTransform()
-> std::function<absl::StatusOr<ExprNodePtr>(ExprNodePtr)> {
return [this, visited = absl::flat_hash_set<Fingerprint>()](
ExprNodePtr node) mutable -> absl::StatusOr<ExprNodePtr> {
EXPECT_TRUE(visited.emplace(node->fingerprint()).second)
<< "duplicate call to transform_fn";
if (node->op() == s_) {
std::vector<absl::StatusOr<ExprNodePtr>> new_deps;
for (auto& dep : node->node_deps()) {
new_deps.push_back(WithNewOperator(dep, s_));
}
return CallOp(a_, new_deps);
}
if (node->op() == a_) {
std::vector<absl::StatusOr<ExprNodePtr>> new_deps;
for (auto& dep : node->node_deps()) {
new_deps.push_back(WithNewOperator(dep, s_));
}
return CallOp(b_, new_deps);
}
if (node->op() == c_) {
std::vector<absl::StatusOr<ExprNodePtr>> new_deps;
for (auto& dep : node->node_deps()) {
new_deps.push_back(CallOp(b_, {dep}));
}
return CallOp(b_, new_deps);
}
return node;
};
}
private:
ExprOperatorPtr a_ =
std::make_shared<DummyOp>("a", ExprOperatorSignature::MakeVariadicArgs());
ExprOperatorPtr b_ =
std::make_shared<DummyOp>("b", ExprOperatorSignature::MakeVariadicArgs());
ExprOperatorPtr c_ =
std::make_shared<DummyOp>("c", ExprOperatorSignature::MakeVariadicArgs());
ExprOperatorPtr s_ =
std::make_shared<DummyOp>("s", ExprOperatorSignature::MakeVariadicArgs());
};
TEST_F(DeepTransformTest, Trivial) {
ASSERT_THAT(DeepTransform(A(), SabTransform()),
IsOkAndHolds(EqualsExpr(B())));
ASSERT_THAT(DeepTransform(B(), SabTransform()),
IsOkAndHolds(EqualsExpr(B())));
ASSERT_THAT(DeepTransform(S(), SabTransform()),
IsOkAndHolds(EqualsExpr(B())));
}
TEST_F(DeepTransformTest, CacheHitCoverage) {
{
auto expr = B(A(A()), A(S()));
auto expected = B(B(B()), B(B()));
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
{
auto expr = B(B(S()), A(S()));
auto expected = B(B(B()), B(B()));
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
}
TEST_F(DeepTransformTest, TooManyProcessedNodes) {
ASSERT_THAT(DeepTransform(
Literal<int>(0),
[](ExprNodePtr node) {
return Literal<int>(node->qvalue()->UnsafeAs<int>() + 1);
},
std::nullopt,
1000),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("too many processed nodes")));
}
TEST_F(DeepTransformTest, LogTransformationFn) {
std::string trace;
auto transformations_logger = [&trace](ExprNodePtr a, ExprNodePtr b,
DeepTransformStage stage) {
if (stage == DeepTransformStage::kWithNewDeps) {
if (a->fingerprint() != b->fingerprint()) {
trace += GetDebugSnippet(b) +
" got new dependencies: " + GetDebugSnippet(a) + "\n";
}
} else if (stage == DeepTransformStage::kNewChildAfterTransformation) {
trace += GetDebugSnippet(b) + " contains " + GetDebugSnippet(a) + "\n";
}
};
ASSERT_OK(DeepTransform(C(A()), SabTransform(),
transformations_logger));
EXPECT_EQ(
"c(a():INT32):INT32 got new dependencies: c(b():INT32):INT32\n"
"b(b(...):INT32):INT32 contains b(b():INT32):INT32\n",
trace);
}
TEST_F(DeepTransformTest, InfiniteLoop) {
ASSERT_THAT(DeepTransform(S(), [&](ExprNodePtr) { return S(S()); }),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("infinite loop of node transformations "
"containing node s(s():INT32):INT32")));
}
TEST_F(DeepTransformTest, UnaryRecursion) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 10; ++i) {
expr = S(expr);
expected = B(expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
TEST_F(DeepTransformTest, UnaryRecursionStress) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 1000; ++i) {
expr = S(expr);
expected = B(expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
TEST_F(DeepTransformTest, BinaryRecursion) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 10; ++i) {
expr = S(expr, expr);
expected = B(expected, expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
TEST_F(DeepTransformTest, BinaryRecursionStress) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 1000; ++i) {
expr = S(expr, expr);
expected = B(expected, expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
TEST_F(DeepTransformTest, TernaryRecursionStress) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 1000; ++i) {
expr = S(expr, expr, expr);
expected = B(expected, expected, expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
TEST_F(DeepTransformTest, ComplexRecursionStress) {
auto expr = S();
auto expected = B();
for (int i = 0; i < 1000; ++i) {
expr = S(A(expr), B(expr, expected), expr);
expected = B(B(expected), B(expected, expected), expected);
}
ASSERT_THAT(DeepTransform(expr, SabTransform()),
IsOkAndHolds(EqualsExpr(expected)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_visitor.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_visitor_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
d805fca5-8a9a-4455-a3fa-300e6ce5fe81 | cpp | google/arolla | expr_compiler | arolla/serving/expr_compiler.cc | arolla/serving/expr_compiler_test.cc | #include "arolla/serving/expr_compiler.h"
#include <optional>
#include "absl/base/no_destructor.h"
#include "arolla/expr/optimization/optimizer.h"
namespace arolla::serving_impl {
absl::NoDestructor<std::optional<expr::Optimizer>>
ExprCompilerDefaultOptimizer::optimizer_;
} | #include "arolla/serving/expr_compiler.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/thread_safe_model_executor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/io/accessors_input_loader.h"
#include "arolla/io/accessors_slot_listener.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/slot_listener.h"
#include "arolla/io/tuple_input_loader.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/simple_executable.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/status_macros_backport.h"
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::CompiledExpr;
using ::arolla::GetQType;
using ::arolla::InputLoaderPtr;
using ::arolla::SlotListener;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::Leaf;
using ::arolla::testing::WithExportValueAnnotation;
using ::testing::_;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
struct TestInput {
float x;
float y;
};
struct TestSideOutput {
std::optional<float> subtract;
};
absl::StatusOr<std::unique_ptr<arolla::InputLoader<TestInput>>>
CreateInputLoader() {
return ::arolla::CreateAccessorsInputLoader<TestInput>(
"x", [](const auto& x) { return x.x; },
"y", [](const auto& x) { return x.y; });
}
absl::StatusOr<std::unique_ptr<SlotListener<TestSideOutput>>>
CreateSlotListener() {
return ::arolla::CreateAccessorsSlotListener<TestSideOutput>(
"subtract", [](float x, TestSideOutput* out) { out->subtract = x; });
}
absl::StatusOr<ExprNodePtr> CreateExpr() {
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return WithExportValueAnnotation(add_expr, "subtract", subtract_expr);
}
absl::StatusOr<std::unique_ptr<CompiledExpr>> CreateCompiledExpr() {
ASSIGN_OR_RETURN(auto add_expr, CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSIGN_OR_RETURN(auto subtract_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
return ::arolla::expr::CompileForDynamicEvaluation(
::arolla::expr::DynamicEvaluationEngineOptions(), add_expr,
{{"x", GetQType<float>()}, {"y", GetQType<float>()}},
{{"subtract", subtract_expr}});
}
}
namespace arolla {
namespace {
class TestInplaceCompiledExpr : public InplaceCompiledExpr {
public:
TestInplaceCompiledExpr()
: InplaceCompiledExpr(
{}, GetQType<float>(),
{}) {}
absl::StatusOr<std::unique_ptr<BoundExpr>> InplaceBind(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
TypedSlot output_slot,
const absl::flat_hash_map<std::string, TypedSlot>& named_output_slots)
const final {
return std::make_unique<SimpleBoundExpr>(
input_slots, output_slot,
std::vector<std::unique_ptr<BoundOperator>>{},
std::vector<std::unique_ptr<BoundOperator>>{},
named_output_slots);
}
};
class ExprCompilerTest : public ::testing::Test {
public:
void SetUp() override {
ASSERT_OK_AND_ASSIGN(auto add_expr,
expr::CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(auto subtract_expr,
expr::CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(expr_, CreateExpr());
ASSERT_OK_AND_ASSIGN(compiled_expr_, CreateCompiledExpr());
}
expr::ExprNodePtr expr_;
std::unique_ptr<const CompiledExpr> compiled_expr_;
};
TEST_F(ExprCompilerTest, CompileExprNodePtr) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile(expr_));
ASSERT_OK_AND_ASSIGN(
auto model_with_options,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile<ExprCompilerFlags::kEvalWithOptions>(expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
static_assert(
std::is_same_v<decltype(model_with_options),
std::function<absl::StatusOr<std::optional<float>>(
const ModelFunctionOptions&, const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
EXPECT_THAT(model_with_options({}, input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprNodePtrWithSideOutput) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&, TestSideOutput*)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, CompileCompiledExpr) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileCompiledExprForceNonOptionalOutput) {
ASSERT_OK_AND_ASSIGN(auto model, (ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.ForceNonOptionalOutput()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<float>(const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileCompiledExprWithSideOutput) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(*compiled_expr_));
static_assert(
std::is_same_v<decltype(model),
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&, TestSideOutput*)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, CompileExprOperatorWithTuple) {
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<std::tuple<float, float>, float>())
.CompileOperator(expr::LookupOperator("math.add")));
static_assert(
std::is_same_v<decltype(model), std::function<absl::StatusOr<float>(
const std::tuple<float, float>&)>>);
EXPECT_THAT(model({28, 29}), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprOperatorWithTypedRefs) {
ASSERT_OK_AND_ASSIGN(
auto model, (ExprCompiler<absl::Span<const TypedRef>, TypedValue>())
.CompileOperator(expr::LookupOperator("math.add"),
{GetQType<float>(), GetQType<float>()}));
static_assert(
std::is_same_v<decltype(model), std::function<absl::StatusOr<TypedValue>(
const absl::Span<const TypedRef>&)>>);
auto a = TypedValue::FromValue<float>(28);
auto b = TypedValue::FromValue<float>(29);
std::vector<TypedRef> args{a.AsRef(), b.AsRef()};
ASSERT_OK_AND_ASSIGN(TypedValue res, model(args));
EXPECT_THAT(res.As<float>(), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, Ownership) {
ExprCompiler<TestInput, std::optional<float>, TestSideOutput> mc;
mc.SetInputLoader(CreateInputLoader());
ExprCompiler<TestInput, std::optional<float>, TestSideOutput> other_mc =
std::move(mc);
other_mc.SetSlotListener(CreateSlotListener());
mc = std::move(other_mc);
mc.AllowOutputCasting();
ASSERT_OK(mc.Compile(expr_));
}
TEST_F(ExprCompilerTest, Move) {
auto set_input_loader = [](auto mc) {
return std::move(mc).SetInputLoader(CreateInputLoader());
};
ASSERT_OK_AND_ASSIGN(
auto model,
set_input_loader(
ExprCompiler<TestInput, std::optional<float>, TestSideOutput>()
.SetSlotListener(CreateSlotListener()))
.SetExperimentalArenaAllocator()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, Optimizer) {
auto replace_add_with_subtract =
[](expr::ExprNodePtr x) -> absl::StatusOr<expr::ExprNodePtr> {
if (expr::IsBackendOperator(*expr::DecayRegisteredOperator(x->op()),
"math.add")) {
return expr::WithNewOperator(x, *expr::LookupOperator("math.subtract"));
}
return x;
};
ASSERT_OK_AND_ASSIGN(auto model,
(ExprCompiler<TestInput, std::optional<float>>())
.SetInputLoader(CreateInputLoader())
.SetExprOptimizer(replace_add_with_subtract)
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input), IsOkAndHolds(-1));
}
TEST_F(ExprCompilerTest, OtherOptionsSmokeTest) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetExperimentalArenaAllocator()
.SetAlwaysCloneThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(model(input, nullptr), IsOkAndHolds(57));
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, DefaultThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
EXPECT_THAT((model.target<expr::ThreadSafePoolModelExecutor<
TestInput, std::optional<float>, TestSideOutput>>()),
NotNull());
}
TEST_F(ExprCompilerTest, DefaultThreadSafetyPolicy_Codegen) {
ASSERT_OK_AND_ASSIGN(auto eval_model, (ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.Compile(*compiled_expr_));
ASSERT_OK_AND_ASSIGN(auto codegen_model,
(ExprCompiler<TestInput, float>())
.SetInputLoader(CreateInputLoader())
.Compile(TestInplaceCompiledExpr()));
EXPECT_THAT(
(eval_model
.target<expr::ThreadSafePoolModelExecutor<TestInput, float>>()),
NotNull());
EXPECT_THAT(
(codegen_model
.target<expr::ThreadSafePoolModelExecutor<TestInput, float>>()),
IsNull());
}
TEST_F(ExprCompilerTest, PoolThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetPoolThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
EXPECT_THAT((model.target<expr::ThreadSafePoolModelExecutor<
TestInput, std::optional<float>, TestSideOutput>>()),
NotNull());
}
TEST_F(ExprCompilerTest, AlwaysCloneThreadSafetyPolicy) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetAlwaysCloneThreadSafetyPolicy()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, ThreadUnsafe) {
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(CreateSlotListener())
.SetThreadUnsafe_I_SWEAR_TO_COPY_MODEL_FUNCTION_BEFORE_CALL()
.AllowOutputCasting()
.Compile(expr_));
TestInput input{.x = 28, .y = 29};
TestSideOutput side_output;
EXPECT_THAT(model(input, &side_output), IsOkAndHolds(57));
EXPECT_THAT(side_output.subtract, Eq(-1));
}
TEST_F(ExprCompilerTest, ForceNonOptionalOutput) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("math.neg", {Leaf("x")}));
ASSERT_OK_AND_ASSIGN(
auto input_loader,
::arolla::CreateAccessorsInputLoader<std::optional<float>>(
"x", [](const auto& x) { return OptionalValue<float>(x); }));
ASSERT_OK_AND_ASSIGN(
auto model,
(ExprCompiler<std::optional<float>, std::optional<float>>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.Compile(expr));
EXPECT_THAT(model(std::nullopt), IsOkAndHolds(std::nullopt));
EXPECT_THAT((ExprCompiler<std::optional<float>, float>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.AllowOutputCasting()
.Compile(expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("model output is deduced to optional, while "
"non-optional is requested")));
ASSERT_OK_AND_ASSIGN(
auto full_model,
(ExprCompiler<std::optional<float>, float>())
.SetInputLoader(MakeNotOwningInputLoader(input_loader.get()))
.ForceNonOptionalOutput()
.Compile(expr));
EXPECT_THAT(full_model(-57), IsOkAndHolds(57));
EXPECT_THAT(full_model(std::nullopt),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("expects a present value, got missing")));
}
class VoidSlotListener : public StaticSlotListener<void> {
public:
VoidSlotListener() : StaticSlotListener<void>({}) {}
absl::StatusOr<BoundSlotListener<Output>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots)
const final {
return absl::UnimplementedError("unimplemented");
}
private:
};
TEST_F(ExprCompilerTest, Errors) {
EXPECT_THAT((ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetSlotListener(CreateSlotListener())
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("InputLoader is not specified, use "
"ExprCompiler::SetInputLoader()")));
EXPECT_THAT(
(ExprCompiler<TestInput, std::optional<float>, TestSideOutput>())
.SetInputLoader(CreateInputLoader())
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("SlotListener is not specified, use "
"ExprCompiler::SetSlotListener() or ExprCompiler<...> "
"without SideOutput template parameter")));
EXPECT_THAT((ExprCompiler<TestInput, std::optional<float>, void>())
.SetInputLoader(CreateInputLoader())
.SetSlotListener(std::unique_ptr<SlotListener<void>>(
new VoidSlotListener()))
.Compile(expr_),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("SlotListener with SideOutput==void is not "
"supported by ExprCompiler")));
EXPECT_THAT(
(ExprCompiler<std::tuple<float, float>, std::optional<float>>())
.SetInputLoader(
TupleInputLoader<std::tuple<float, float>>::Create({"x", "y"}))
.CompileOperator(nullptr),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("InputLoader is specified, but not needed for "
"ExprCompiler::CompilerOperator")));
}
TEST_F(ExprCompilerTest, CompileExprSet) {
ASSERT_OK_AND_ASSIGN(
auto models,
CompileExprSet(
ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
absl::flat_hash_map<std::string, absl::StatusOr<expr::ExprNodePtr>>{
{"first", expr_}, {"second", expr_}}));
ASSERT_THAT(models,
UnorderedElementsAre(Pair("first", _), Pair("second", _)));
static_assert(
std::is_same_v<std::decay_t<decltype(models[""])>,
std::function<absl::StatusOr<std::optional<float>>(
const TestInput&)>>);
TestInput input{.x = 28, .y = 29};
EXPECT_THAT(models["first"](input), IsOkAndHolds(57));
}
TEST_F(ExprCompilerTest, CompileExprSet_Errors) {
EXPECT_THAT(
CompileExprSet(
ExprCompiler<TestInput, std::optional<float>>()
.SetInputLoader(CreateInputLoader())
.AllowOutputCasting(),
absl::flat_hash_map<std::string, absl::StatusOr<expr::ExprNodePtr>>{
{"first", expr_},
{"bad_model", absl::FailedPreconditionError("very bad model")}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
"very bad model; while initializing model \"bad_model\""));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/expr_compiler.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/serving/expr_compiler_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
bd5b418b-318e-4f6d-837b-518a563c09b1 | cpp | tensorflow/tensorflow | xla_rewrite_util | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
namespace tensorflow {
mlir::LogicalResult EraseClusterFuncs(
llvm::MutableArrayRef<mlir::tf_device::ClusterFuncOp> to_be_erased) {
for (auto cluster : to_be_erased) {
auto old_parallel_execute =
cluster->getParentOfType<mlir::tf_device::ParallelExecuteOp>();
if (!old_parallel_execute) {
LOG(ERROR) << "Parent op of cluster " << cluster.getOperationName().str()
<< " is not ParallelExecuteOp.";
return mlir::failure();
}
for (auto result : old_parallel_execute.getExecuteOutputs()) {
for (mlir::Operation* user :
llvm::make_early_inc_range(result.getUsers())) {
if (llvm::isa<mlir::TF::TPUPartitionedOutputV2Op>(user)) {
assert(user->use_empty());
user->erase();
}
}
}
for (auto operand : cluster.getOperands()) {
mlir::Operation* def = operand.getDefiningOp();
if (operand.hasOneUse() &&
llvm::isa_and_nonnull<mlir::TF::TPUPartitionedInputV2Op>(def)) {
operand.dropAllUses();
def->erase();
}
}
if (!old_parallel_execute->use_empty()) {
LOG(ERROR) << "Use of parallel execute op "
<< old_parallel_execute.getOperationName().str()
<< " is not empty.";
return mlir::failure();
}
old_parallel_execute->erase();
}
return mlir::success();
}
int MovePreservedParallelExecuteChildren(
int num_cores_per_replica,
llvm::SmallVector<mlir::Type, 8>& concatenated_output_types,
mlir::OpBuilder* builder, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::tf_device::ParallelExecuteOp old_parallel_execute,
mlir::tf_device::ParallelExecuteOp* new_parallel_execute) {
const size_t num_moved_children =
old_parallel_execute.getRegions().size() - 1;
*new_parallel_execute = builder->create<mlir::tf_device::ParallelExecuteOp>(
old_parallel_execute->getLoc(),
num_moved_children + num_cores_per_replica, concatenated_output_types);
int cluster_idx = -1;
for (size_t child_idx = 0;
child_idx < old_parallel_execute.getRegions().size(); ++child_idx) {
auto& block = old_parallel_execute.GetRegionBlockWithIndex(child_idx);
if (cluster_func->getBlock() == &block) {
assert(cluster_idx == -1);
cluster_idx = child_idx;
}
}
assert(cluster_idx != -1);
for (int child_idx = 0; child_idx < num_moved_children; ++child_idx) {
int old_idx = child_idx >= cluster_idx ? child_idx + 1 : child_idx;
int new_idx = child_idx >= cluster_idx ? child_idx + num_cores_per_replica
: child_idx;
new_parallel_execute->getRegions()[new_idx].takeBody(
old_parallel_execute.getRegions()[old_idx]);
}
return cluster_idx;
}
mlir::tf_device::LaunchOp WrapOpInLaunch(mlir::OpBuilder* builder,
mlir::Location loc,
mlir::Operation* op,
llvm::StringRef device) {
mlir::OpBuilder::InsertPoint insert_point = builder->saveInsertionPoint();
auto launch = builder->create<mlir::tf_device::LaunchOp>(
loc, builder->getStringAttr(device), op->getResultTypes());
launch.getBody().push_back(new mlir::Block);
builder->setInsertionPointToEnd(&launch.GetBody());
builder->create<mlir::tf_device::ReturnOp>(loc, op->getResults());
op->moveBefore(launch.GetBody().getTerminator());
builder->restoreInsertionPoint(insert_point);
return launch;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
#include <string>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
TEST(XlaRewriteUtilTest, TestEraseClusterFuncs) {
static const char* const module_str =
R"(
module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
func.func @convert_cluster_func(%arg0: tensor<i32>) -> () {
%2 = "tf_device.parallel_execute"() ({
%3 = "tf_device.cluster_func"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0", func = @func} : (tensor<i32>) -> tensor<i32>
tf_device.return %3 : tensor<i32>
}) : () -> tensor<i32>
return
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
return %arg0 : tensor<i32>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
EXPECT_TRUE(mlir::succeeded(tensorflow::EraseClusterFuncs(cluster_func_ops)));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> new_cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
new_cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(new_cluster_func_ops.size(), 0);
}
TEST(XlaRewriteUtilTest, TestWrapOpInLaunch) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
std::string device = "/job:localhost/replica:0/task:0/device:CPU:0";
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
mlir::OpBuilder builder(&context);
auto loc = cluster->getLoc();
auto launch_op = tensorflow::WrapOpInLaunch(&builder, loc, cluster, device);
EXPECT_TRUE(llvm::isa<mlir::tf_device::LaunchOp>(launch_op));
launch_op->erase();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7c4f1970-d8d3-4a57-b484-7690362ef1d8 | cpp | google/langsvr | comparators | include/langsvr/lsp/comparators.h | src/lsp/comparators_test.cc | #ifndef LANGSVR_LSP_COMPARATORS_H_
#define LANGSVR_LSP_COMPARATORS_H_
#include "langsvr/lsp/lsp.h"
namespace langsvr::lsp {
inline int Compare(Position a, Position b) {
if (a.line < b.line) {
return -1;
}
if (a.line > b.line) {
return 1;
}
if (a.character < b.character) {
return -1;
}
if (a.character > b.character) {
return 1;
}
return 0;
}
inline bool operator<(Position a, Position b) {
return Compare(a, b) < 0;
}
inline bool operator<=(Position a, Position b) {
return Compare(a, b) <= 0;
}
inline bool operator>(Position a, Position b) {
return Compare(a, b) > 0;
}
inline bool operator>=(Position a, Position b) {
return Compare(a, b) >= 0;
}
inline bool ContainsExclusive(Range r, Position p) {
return p >= r.start && p < r.end;
}
inline bool ContainsInclusive(Range r, Position p) {
return p >= r.start && p <= r.end;
}
}
#endif | #include "include/langsvr/lsp/comparators.h"
#include "langsvr/lsp/lsp.h"
#include "langsvr/lsp/printer.h"
#include "gmock/gmock.h"
namespace langsvr::lsp {
namespace {
TEST(ComparatorsTest, Position) {
const Position pos_1_1{1, 1};
const Position pos_1_2{1, 2};
const Position pos_2_1{2, 1};
const Position pos_2_2{2, 2};
EXPECT_EQ(Compare(pos_1_1, pos_1_1), 0);
EXPECT_EQ(Compare(pos_1_1, pos_1_2), -1);
EXPECT_EQ(Compare(pos_1_2, pos_1_1), 1);
EXPECT_TRUE(pos_1_1 == pos_1_1);
EXPECT_FALSE(pos_1_1 != pos_1_1);
EXPECT_FALSE(pos_1_1 < pos_1_1);
EXPECT_FALSE(pos_1_1 > pos_1_1);
EXPECT_TRUE(pos_1_1 <= pos_1_1);
EXPECT_TRUE(pos_1_1 >= pos_1_1);
EXPECT_FALSE(pos_1_1 == pos_1_2);
EXPECT_TRUE(pos_1_1 != pos_1_2);
EXPECT_TRUE(pos_1_1 < pos_1_2);
EXPECT_FALSE(pos_1_1 > pos_1_2);
EXPECT_TRUE(pos_1_1 <= pos_1_2);
EXPECT_FALSE(pos_1_1 >= pos_1_2);
EXPECT_FALSE(pos_1_2 == pos_1_1);
EXPECT_TRUE(pos_1_2 != pos_1_1);
EXPECT_FALSE(pos_1_2 < pos_1_1);
EXPECT_TRUE(pos_1_2 > pos_1_1);
EXPECT_FALSE(pos_1_2 <= pos_1_1);
EXPECT_TRUE(pos_1_2 >= pos_1_1);
EXPECT_FALSE(pos_1_1 == pos_2_1);
EXPECT_TRUE(pos_1_1 != pos_2_1);
EXPECT_TRUE(pos_1_1 < pos_2_1);
EXPECT_FALSE(pos_1_1 > pos_2_1);
EXPECT_TRUE(pos_1_1 <= pos_2_1);
EXPECT_FALSE(pos_1_1 >= pos_2_1);
EXPECT_FALSE(pos_2_1 == pos_1_1);
EXPECT_TRUE(pos_2_1 != pos_1_1);
EXPECT_FALSE(pos_2_1 < pos_1_1);
EXPECT_TRUE(pos_2_1 > pos_1_1);
EXPECT_FALSE(pos_2_1 <= pos_1_1);
EXPECT_TRUE(pos_2_1 >= pos_1_1);
std::array positions = {
pos_2_1,
pos_1_2,
pos_1_1,
pos_2_2,
};
std::sort(positions.begin(), positions.end());
std::array positions_sorted = {
pos_1_1,
pos_1_2,
pos_2_1,
pos_2_2,
};
EXPECT_EQ(positions, positions_sorted);
}
TEST(ComparatorsTest, ContainsExclusive) {
const Position pos_1_1{1, 1};
const Position pos_1_2{1, 2};
const Position pos_2_1{2, 1};
const Position pos_2_2{2, 2};
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_1}, pos_1_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_1}, pos_1_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_1}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_1}, pos_2_2));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_1_2}, pos_1_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_2}, pos_1_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_2}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_1_2}, pos_2_2));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_2_1}, pos_1_1));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_2_1}, pos_1_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_2_1}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_2_1}, pos_2_2));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_2_2}, pos_1_1));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_2_2}, pos_1_2));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_1, pos_2_2}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_1, pos_2_2}, pos_2_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_1_2}, pos_1_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_1_2}, pos_1_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_1_2}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_1_2}, pos_2_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_2_1}, pos_1_1));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_2, pos_2_1}, pos_1_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_2_1}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_2_1}, pos_2_2));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_2_2}, pos_1_1));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_2, pos_2_2}, pos_1_2));
EXPECT_TRUE(ContainsExclusive(Range{pos_1_2, pos_2_2}, pos_2_1));
EXPECT_FALSE(ContainsExclusive(Range{pos_1_2, pos_2_2}, pos_2_2));
}
TEST(ComparatorsTest, ContainsInclusive) {
const Position pos_1_1{1, 1};
const Position pos_1_2{1, 2};
const Position pos_2_1{2, 1};
const Position pos_2_2{2, 2};
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_1_1}, pos_1_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_1_1}, pos_1_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_1_1}, pos_2_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_1_1}, pos_2_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_1_2}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_1_2}, pos_1_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_1_2}, pos_2_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_1_2}, pos_2_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_1}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_1}, pos_1_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_1}, pos_2_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_1, pos_2_1}, pos_2_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_2}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_2}, pos_1_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_2}, pos_2_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_1, pos_2_2}, pos_2_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_1_2}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_1_2}, pos_1_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_1_2}, pos_2_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_1_2}, pos_2_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_2_1}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_2_1}, pos_1_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_2_1}, pos_2_1));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_2_1}, pos_2_2));
EXPECT_FALSE(ContainsInclusive(Range{pos_1_2, pos_2_2}, pos_1_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_2_2}, pos_1_2));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_2_2}, pos_2_1));
EXPECT_TRUE(ContainsInclusive(Range{pos_1_2, pos_2_2}, pos_2_2));
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/include/langsvr/lsp/comparators.h | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/lsp/comparators_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
84795ad2-e755-44a1-8402-5295dc75e04e | cpp | tensorflow/tensorflow | function_optimizer | tensorflow/core/grappler/optimizers/function_optimizer.cc | tensorflow/core/grappler/optimizers/function_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/lower_case_op.h"
#include "tensorflow/core/common_runtime/lower_functional_ops.h"
#include "tensorflow/core/common_runtime/lower_if_op.h"
#include "tensorflow/core/common_runtime/lower_while_op.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/functions.h"
#include "tensorflow/core/lib/gtl/map_util.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr const char* const kFuncAttr = FunctionLibraryDefinition::kFuncAttr;
constexpr const char* const kNoSpecializeAttr = "_nospecialize";
constexpr const char* const kGrapplerSpecializedFuncAttr =
"_GrapplerSpecializedFunc";
bool IsDirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
return func_node.op() == func.signature().name();
}
bool IsIndirectFunctionCall(const FunctionDef& func, const NodeDef& func_node) {
if (!IsPartitionedCall(func_node) && !IsStatefulPartitionedCall(func_node)) {
return false;
}
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return func_attr != nullptr && func_attr->has_func() &&
func_attr->func().name() == func.signature().name();
}
AttrSlice FunctionInstantiationAttributes(const FunctionDef& func,
const NodeDef& func_node) {
if (IsDirectFunctionCall(func, func_node)) {
return AttrSlice(func_node);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* func_attr = AttrSlice(func_node).Find(kFuncAttr);
return AttrSlice(&func_attr->func().attr());
} else {
LOG(WARNING) << "Can't resolve function instantiation attributes: "
<< SummarizeNodeDef(func_node);
return AttrSlice();
}
}
class FakeDevice : public Device {
public:
FakeDevice(Env* env, const string& device) : Device(env, attr(device)) {}
explicit FakeDevice(const string& device) : FakeDevice(nullptr, device) {}
Status Sync() override { return absl::OkStatus(); }
private:
static DeviceAttributes attr(const string& device) {
DeviceNameUtils::ParsedName parsed_name;
bool parsed = DeviceNameUtils::ParseFullName(device, &parsed_name);
DCHECK(parsed) << "Failed to parse full device name: " << device;
DeviceAttributes attr;
attr.set_name(device);
attr.set_device_type(parsed_name.type);
return attr;
}
};
bool MarkedNoSpecialize(const FunctionDef& fdef) {
const auto attr = AttrSlice(&fdef.attr());
bool nospecialize = false;
return TryGetNodeAttr(attr, kNoSpecializeAttr, &nospecialize) && nospecialize;
}
struct FunctionSpecializationSignature {
using InputPort = int;
using OutputPort = int;
string func_name;
bool is_in_fetch_set;
absl::flat_hash_set<OutputPort> active_outputs;
absl::flat_hash_map<string, DataType> type_parameters;
absl::flat_hash_map<string, AttrValue> body_parameters;
absl::flat_hash_map<InputPort, string> const_inputs;
bool operator==(const FunctionSpecializationSignature& other) const {
bool equals = func_name == other.func_name &&
is_in_fetch_set == other.is_in_fetch_set &&
active_outputs == other.active_outputs &&
type_parameters == other.type_parameters &&
const_inputs == other.const_inputs;
if (!equals) return false;
if (body_parameters.size() != other.body_parameters.size()) return false;
for (const auto& lhs : body_parameters) {
auto it = other.body_parameters.find(lhs.first);
if (it == other.body_parameters.end()) return false;
if (!AreAttrValuesEqual(lhs.second, (*it).second,
true)) {
return false;
}
}
return true;
}
template <typename H>
friend H AbslHashValue(H h, const FunctionSpecializationSignature& s) {
H base = H::combine(std::move(h), s.func_name, s.is_in_fetch_set);
std::vector<uint64> hashes;
hashes.reserve(s.active_outputs.size()
+ s.type_parameters.size() * 2
+ s.body_parameters.size() * 2
+ s.const_inputs.size() * 2);
absl::c_transform(s.active_outputs, std::back_inserter(hashes),
hash<OutputPort>());
using TypeParam = std::pair<const string, DataType>;
absl::c_for_each(s.type_parameters, [&hashes](const TypeParam& type_param) {
AttrValue attr_value;
attr_value.set_type(type_param.second);
hashes.push_back(Hash64(type_param.first));
hashes.push_back(AttrValueHash(attr_value));
});
using BodyParam = std::pair<const string, AttrValue>;
absl::c_for_each(s.body_parameters, [&hashes](const BodyParam& body_param) {
hashes.push_back(Hash64(body_param.first));
hashes.push_back(FastAttrValueHash(body_param.second));
});
using ConstInput = std::pair<const InputPort, string>;
absl::c_for_each(s.const_inputs, [&hashes](const ConstInput& const_input) {
hashes.push_back(hash<InputPort>()(const_input.first));
hashes.push_back(Hash64(const_input.second));
});
absl::c_sort(hashes);
return H::combine_contiguous(std::move(base), hashes.data(), hashes.size());
}
};
struct FunctionSpecialization {
string specialized_func_name;
bool is_in_fetch_set;
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
absl::flat_hash_set<int> active_outputs;
std::vector<std::pair<int, int>> output_mapping;
};
class FunctionOptimizerContext {
public:
explicit FunctionOptimizerContext(const GrapplerItem& item,
RewriterConfig::Toggle opt_level,
const GraphDef& graph)
: item_(&item),
opt_level_(opt_level),
function_library_(OpRegistry::Global(), graph.library()),
truly_const_nodes_(InferTrulyConstNodes(item, graph)),
graph_view_(&graph) {}
const GrapplerItem& item() const { return *item_; }
const int graph_version() const { return item_->graph.versions().producer(); }
RewriterConfig::Toggle opt_level() const { return opt_level_; }
const FunctionLibraryDefinition& function_library() const {
return function_library_;
}
FunctionLibraryDefinition& function_library() { return function_library_; }
const absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>&
tensor_mapping() const {
return tensor_mapping_;
}
const GraphView& graph_view() const { return graph_view_; }
bool IsFeedNode(const string& node_name) const {
return absl::c_any_of(
item_->feed, [&](const std::pair<std::string, Tensor>& feed) {
return ParseTensorName(feed.first).node() == node_name;
});
}
bool IsFetchNode(const string& node_name) const {
return absl::c_any_of(item_->fetch, [&](const string& fetch) {
return ParseTensorName(fetch).node() == node_name;
});
}
bool IsTrulyConst(const string& name) const {
return TrulyConstNode(name) != nullptr;
}
const NodeDef* TrulyConstNode(const string& name) const {
return gtl::FindWithDefault(truly_const_nodes_, name, nullptr);
}
const FunctionSpecialization* FindFunctionSpecialization(
const FunctionSpecializationSignature& sig) const {
return gtl::FindOrNull(specialized_functions_, sig);
}
void AddSpecializedFunction(const FunctionSpecializationSignature& sig,
const FunctionSpecialization& specialized_func) {
specialized_functions_.emplace(sig, specialized_func);
}
void AddTensorMapping(const SafeTensorId& from, const SafeTensorId& to) {
DCHECK(from.index() != Graph::kControlSlot)
<< "Tensor mapping must be from regular tensor";
DCHECK(to.index() != Graph::kControlSlot)
<< "Tensor mapping must be to regular tensor";
auto inserted = tensor_mapping_.insert({from, to});
DCHECK(inserted.second)
<< "Failed to insert duplicated tensor mapping: "
<< "from=" << from.ToString() << " to=" << to.ToString();
}
void AddTensorMapping(const string& func_node,
const FunctionSpecialization& specialized_func) {
for (const auto& pair : specialized_func.output_mapping) {
int from_idx = pair.first;
int to_idx = pair.second;
if (from_idx != to_idx) {
SafeTensorId from_tensor(func_node, from_idx);
SafeTensorId to_tensor(func_node, to_idx);
AddTensorMapping(from_tensor, to_tensor);
}
}
}
private:
static absl::flat_hash_map<string, const NodeDef*> InferTrulyConstNodes(
const GrapplerItem& item, const GraphDef& graph) {
absl::flat_hash_set<absl::string_view> feed_nodes;
for (const auto& feed : item.feed) {
feed_nodes.insert(feed.first);
}
absl::flat_hash_map<string, const NodeDef*> const_nodes;
for (const NodeDef& node : graph.node()) {
if (IsConstant(node) && !feed_nodes.contains(node.name())) {
const_nodes[node.name()] = &node;
}
}
return const_nodes;
}
const GrapplerItem* item_;
RewriterConfig::Toggle opt_level_;
FunctionLibraryDefinition function_library_;
absl::flat_hash_map<string, const NodeDef*> truly_const_nodes_;
absl::flat_hash_map<FunctionSpecializationSignature,
const FunctionSpecialization>
specialized_functions_;
absl::flat_hash_map<SafeTensorId, SafeTensorId, SafeTensorId::Hasher>
tensor_mapping_;
GraphView graph_view_;
FunctionOptimizerContext(const FunctionOptimizerContext&) = delete;
void operator=(const FunctionOptimizerContext&) = delete;
};
const FunctionDef* FindFunctionCall(const FunctionOptimizerContext& ctx,
const NodeDef& node) {
if (IsPartitionedCall(node) || IsStatefulPartitionedCall(node)) {
const AttrValue* func_attr = AttrSlice(node).Find("f");
return (func_attr != nullptr && func_attr->has_func())
? ctx.function_library().Find(func_attr->func().name())
: nullptr;
}
return ctx.function_library().Find(node.op());
}
absl::flat_hash_set<int> GetActiveOutputs(const NodeDef& node,
const FunctionOptimizerContext& ctx,
int size_hint = 0) {
absl::flat_hash_set<int> active_outputs;
active_outputs.reserve(static_cast<size_t>(size_hint));
const auto node_fanout_edges =
ctx.graph_view().GetFanoutEdges(node, false);
for (const GraphView::Edge& edge : node_fanout_edges) {
active_outputs.insert(edge.src.port_id);
}
for (const string& fetch : ctx.item().fetch) {
TensorId fetch_tensor = ParseTensorName(fetch);
if (fetch_tensor.node() == node.name()) {
active_outputs.insert(fetch_tensor.index());
}
}
return active_outputs;
}
bool HasTrulyConstInputs(const NodeDef& node,
const FunctionOptimizerContext& ctx) {
const auto is_truly_const = [&ctx](const string& input) {
return ctx.IsTrulyConst(NodeName(input));
};
return absl::c_any_of(node.input(), is_truly_const);
}
bool HasUnusedOutputs(const NodeDef& func_node, const FunctionDef& func,
const FunctionOptimizerContext& ctx) {
int num_outputs = func.signature().output_arg_size();
const absl::flat_hash_set<int> active_outputs =
GetActiveOutputs(func_node, ctx, num_outputs);
int active_outputs_size = active_outputs.size();
return active_outputs_size != num_outputs;
}
FunctionDefLibrary PruneFunctionLibrary(const FunctionLibraryDefinition& flib,
const GraphDef& optimized_graph) {
FunctionLibraryDefinition pruned_flib =
flib.ReachableDefinitions(optimized_graph);
int pruned_functions = static_cast<int>(pruned_flib.num_functions()) -
static_cast<int>(flib.num_functions());
VLOG(3) << "Pruned function library: " << pruned_flib.num_functions()
<< " functions (" << pruned_functions << ")";
return pruned_flib.ToProto();
}
Status PushDownConstInputs(const NodeDef& func_node,
const FunctionOptimizerContext& ctx,
GrapplerFunctionItem* item,
absl::flat_hash_set<string>* const_inputs,
absl::flat_hash_set<string>* control_deps) {
const auto record_control_deps = [&](const NodeDef* const_input) {
for (int i = const_input->input_size() - 1; i >= 0; --i) {
const string& input = const_input->input(i);
if (IsControlInput(input))
control_deps->insert(input);
else
break;
}
};
for (int i = func_node.input_size() - 1; i >= 0; --i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) continue;
const string node_name = NodeName(input);
if (ctx.IsTrulyConst(node_name)) {
VLOG(3) << "Push const into function body: input=" << input;
const auto* const_input = CHECK_NOTNULL(ctx.TrulyConstNode(node_name));
const_inputs->insert(input);
record_control_deps(const_input);
TF_RETURN_IF_ERROR(ReplaceInputWithConst(*const_input, i, item));
}
}
return absl::OkStatus();
}
void RemovePushedDownConstInputs(const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
std::vector<string> keep_inputs;
const auto& inputs = specialized_func_node->input();
absl::c_copy_if(inputs, std::back_inserter(keep_inputs),
[&](const string& input) {
return !specialization.const_inputs.contains(input);
});
specialized_func_node->clear_input();
for (const auto& keep : keep_inputs) specialized_func_node->add_input(keep);
if (!specialization.control_deps.empty()) {
absl::flat_hash_set<string> existing_control_deps;
for (const string& input : keep_inputs) {
existing_control_deps.insert(AsControlDependency(NodeName(input)));
}
for (const string& ctrl : specialization.control_deps) {
if (!existing_control_deps.contains(ctrl)) {
VLOG(3) << "Forward control dependency: input=" << ctrl;
specialized_func_node->add_input(ctrl);
}
}
}
}
void RemovePushedDownConstInputTypes(
const FunctionSpecialization& specialization, const NodeDef& func_node,
NodeDef* specialized_func_node) {
if (specialization.const_inputs.empty()) return;
const AttrValue* tin = AttrSlice(func_node).Find("Tin");
if (tin == nullptr || !tin->has_list()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tin"].mutable_list()->clear_type();
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (!specialization.const_inputs.contains(input)) {
DataType dt = tin->list().type(i);
(*attr)["Tin"].mutable_list()->add_type(dt);
}
}
}
void RemoveUnusedOutputsTypes(const FunctionSpecialization& specialization,
const NodeDef& func_node,
NodeDef* specialized_func_node) {
const AttrValue* tout = AttrSlice(func_node).Find("Tout");
if (tout == nullptr || !tout->has_list()) return;
int specialization_active_outputs_size = specialization.active_outputs.size();
if (specialization_active_outputs_size == tout->list().type_size()) return;
auto* attr = specialized_func_node->mutable_attr();
(*attr)["Tout"].mutable_list()->clear_type();
for (int i = 0; i < tout->list().type_size(); ++i) {
if (specialization.active_outputs.contains(i)) {
DataType dt = tout->list().type(i);
(*attr)["Tout"].mutable_list()->add_type(dt);
}
}
}
Status UpdateSpecializedFunctionCallSite(const FunctionDef& func,
const NodeDef& func_node,
const string& specialized_func_name,
NodeDef* specialized_func_node) {
if (IsDirectFunctionCall(func, func_node)) {
specialized_func_node->set_op(specialized_func_name);
} else if (IsIndirectFunctionCall(func, func_node)) {
auto* attr = specialized_func_node->mutable_attr();
(*attr)[kFuncAttr].mutable_func()->set_name(specialized_func_name);
} else {
return absl::InvalidArgumentError("Unknown function call site");
}
return absl::OkStatus();
}
Status UpdateSpecializedFunctionNode(
const FunctionDef& func, const NodeDef& func_node,
const FunctionSpecialization& specialization,
NodeDef* specialized_func_node) {
bool is_indirect_call = IsIndirectFunctionCall(func, func_node);
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionCallSite(
func, func_node, specialization.specialized_func_name,
specialized_func_node));
RemovePushedDownConstInputs(specialization, specialized_func_node);
if (is_indirect_call) {
RemovePushedDownConstInputTypes(specialization, func_node,
specialized_func_node);
}
if (is_indirect_call && !specialization.is_in_fetch_set) {
RemoveUnusedOutputsTypes(specialization, func_node, specialized_func_node);
}
specialized_func_node->mutable_attr()->erase("_gradient_op_type");
return absl::OkStatus();
}
Status InitializeFunctionSpecializationSignature(
const NodeDef& func_node, const FunctionDef& func,
const AttrSlice& func_instantiation_attr,
const FunctionOptimizerContext& ctx, FunctionSpecializationSignature* sig) {
DCHECK(sig->const_inputs.empty());
DCHECK(sig->active_outputs.empty());
sig->func_name = func.signature().name();
sig->is_in_fetch_set = ctx.IsFetchNode(func_node.name());
sig->active_outputs = GetActiveOutputs(func_node, ctx);
TF_RETURN_IF_ERROR(InstantiationTypeParameters(func, func_instantiation_attr,
&sig->type_parameters));
TF_RETURN_IF_ERROR(InstantiationBodyParameters(func, func_instantiation_attr,
&sig->body_parameters));
for (int i = 0; i < func_node.input_size(); ++i) {
const string& input = func_node.input(i);
if (IsControlInput(input)) break;
if (ctx.IsTrulyConst(input)) {
sig->const_inputs.emplace(i, input);
}
}
return absl::OkStatus();
}
string SpecializedFunctionName(const FunctionOptimizerContext& ctx,
const FunctionDef& func,
const NodeDef& func_node) {
return absl::Substitute(
"$0_specialized_for_$1_at_$2", func.signature().name(),
absl::StrReplaceAll(func_node.name(), {{"/", "_"}}), ctx.item().id);
}
Status SpecializeFunction(const NodeDef& func_node, const FunctionDef& func,
FunctionOptimizerContext* ctx,
GraphDef* optimized_graph) {
VLOG(2) << "Specialize function call: " << SummarizeNodeDef(func_node);
const AttrSlice func_instantiation_attr =
FunctionInstantiationAttributes(func, func_node);
FunctionSpecializationSignature signature;
TF_RETURN_IF_ERROR(InitializeFunctionSpecializationSignature(
func_node, func, func_instantiation_attr, *ctx, &signature));
const FunctionSpecialization* already_specialized =
ctx->FindFunctionSpecialization(signature);
if (already_specialized) {
VLOG(2) << "Function was already specialized in identical context: "
"specialized_name="
<< already_specialized->specialized_func_name;
NodeDef* specialized_func_node = optimized_graph->add_node();
*specialized_func_node = func_node;
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionNode(
func, func_node, *already_specialized, specialized_func_node));
ctx->AddTensorMapping(specialized_func_node->name(), *already_specialized);
return absl::OkStatus();
}
const auto& flib = ctx->function_library();
GrapplerFunctionItem item;
TF_RETURN_IF_ERROR(MakeGrapplerFunctionItem(
func, func_instantiation_attr, flib, ctx->graph_version(), &item));
absl::flat_hash_set<string> const_inputs;
absl::flat_hash_set<string> control_deps;
TF_RETURN_IF_ERROR(PushDownConstInputs(func_node, *ctx, &item, &const_inputs,
&control_deps));
std::vector<std::pair<int, int>> output_mapping;
if (!signature.is_in_fetch_set) {
int num_func_outputs = item.output_size();
absl::flat_hash_set<int> remove;
for (int i = 0; i < num_func_outputs; ++i) {
if (!signature.active_outputs.count(i)) remove.insert(i);
}
TF_RETURN_IF_ERROR(RemoveFunctionOutputs(remove, &item, &output_mapping));
}
FunctionDef specialized_func;
TF_RETURN_IF_ERROR(MakeFunctionDef(item, flib, &specialized_func));
const string specialized_func_name =
SpecializedFunctionName(*ctx, func, func_node);
if (flib.Contains(specialized_func_name)) {
return absl::InternalError("Created duplicate function specialization");
}
specialized_func.mutable_signature()->set_name(specialized_func_name);
auto* specialized_attr = specialized_func.mutable_attr();
(*specialized_attr)[kGrapplerSpecializedFuncAttr].set_b(true);
TF_RETURN_IF_ERROR(ctx->function_library().AddFunctionDef(specialized_func));
NodeDef* specialized_func_node = optimized_graph->add_node();
*specialized_func_node = func_node;
FunctionSpecialization func_specialization = {
specialized_func_name, signature.is_in_fetch_set, const_inputs,
control_deps, signature.active_outputs, output_mapping};
TF_RETURN_IF_ERROR(UpdateSpecializedFunctionNode(
func, func_node, func_specialization, specialized_func_node));
ctx->AddSpecializedFunction(signature, func_specialization);
ctx->AddTensorMapping(specialized_func_node->name(), func_specialization);
return absl::OkStatus();
}
constexpr const char* const kLowerUsingSwitchMergeAttr =
LowerFunctionalOpsPass::kLowerUsingSwitchMergeAttr;
constexpr const char* const kLowerAsMultiDeviceFunctionAttr =
LowerFunctionalOpsPass::kLowerAsMultiDeviceFunctionAttr;
using KeepCallerNode = InlineFunctionBodyOptions::KeepCallerNode;
using OutputControlSource = InlineFunctionBodyOptions::OutputControlSource;
bool CheckBoolAttr(const Node* n, absl::string_view attr_name) {
bool match;
bool found = TryGetNodeAttr(n->attrs(), attr_name, &match);
return found && match;
}
bool CheckStringAttr(const Node* n, absl::string_view attr_name) {
const string& value = GetNodeAttrString(n->attrs(), attr_name);
return !value.empty();
}
bool LowerUsingSwitchMergeIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerUsingSwitchMergeAttr);
}
bool LowerAsMultiDeviceFunctionIsOn(const Node* n) {
return CheckBoolAttr(n, kLowerAsMultiDeviceFunctionAttr);
}
bool MarkedForXlaCompilation(const NodeDef& n) {
auto is_enabled = [&](std::string attr_name) -> bool {
auto it = n.attr().find(attr_name);
return it != n.attr().end() && (!it->second.s().empty() || it->second.b());
};
return is_enabled("_xla_compile_id") || is_enabled("_tpu_replicate") ||
is_enabled(kXlaMustCompileAttr);
}
const bool IsExemptFromSideEffectsExecutionValidation(const string& op) {
static const auto* exemption = new absl::flat_hash_set<string>(
{
"CollectiveGather", "CollectiveReduce", "CollectiveBcastSend",
"CollectiveBcastRecv", "CollectiveBcastSendV2", "CollectiveBcastRecvV2",
"NcclAllReduce", "Send", "Recv", "CollectiveAssignGroupsV2",
"CollectiveInitializeCommunicator",
"RandomUniform", "RandomUniformInt", "RandomStandardNormal",
"ParameterizedTruncatedNormal", "TruncatedNormal", "RandomShuffle",
"Multinomial", "RandomGamma", "RandomGammaGrad", "RandomPoisson",
"RandomPoissonV2",
"ReadVariableOp",
"CudnnRNN", "CudnnRNNBackprop", "CudnnRNNV2", "CudnnRNNV3",
"CudnnRNNBackpropV2", "CudnnRNNBackpropV3",
"EnqueueTPUEmbeddingSparseBatch", "EnqueueTPUEmbeddingIntegerBatch",
"EnqueueTPUEmbeddingSparseTensorBatch",
"EnqueueTPUEmbeddingRaggedTensorBatch",
"EnqueueTPUEmbeddingArbitraryTensorBatch",
"DynamicEnqueueTPUEmbeddingArbitraryTensorBatch",
"SaveV2", "RestoreV2",
"InfeedEnqueue", "InfeedEnqueueTuple"});
return exemption->contains(op);
}
Status ValidateSideEffectsExecution(
const FunctionBody& fbody, OutputControlSource output_control_source,
bool has_outgoing_control_edges,
bool validate_outgoing_control_edge = true) {
std::vector<const Node*> fbody_side_effects;
absl::c_copy_if(
fbody.graph->nodes(), std::back_inserter(fbody_side_effects),
[](const Node* n) {
return n->op_def().is_stateful() && !n->IsArg() && !n->IsRetval() &&
!IsExemptFromSideEffectsExecutionValidation(n->type_string());
});
if (!fbody_side_effects.empty() && !has_outgoing_control_edges) {
const string error_message =
"Can't guarantee execution of function side-effects after inlining. "
"Function call node has no outgoing control edges.";
if (validate_outgoing_control_edge) {
return absl::InternalError(error_message);
} else {
VLOG(3) << error_message;
}
}
absl::flat_hash_set<const Node*> control_sources;
if (output_control_source == OutputControlSource::kDataOutputs) {
control_sources = {fbody.ret_nodes.begin(), fbody.ret_nodes.end()};
} else if (output_control_source == OutputControlSource::kControlOutputs) {
control_sources = {fbody.control_ret_nodes.begin(),
fbody.control_ret_nodes.end()};
}
for (const Node* side_effect : fbody_side_effects) {
VLOG(4) << "Check that node " << side_effect->name()
<< " will execute after inlining.";
bool will_execute = false;
const auto is_control_source = [&](const Node* n) -> void {
const auto it = control_sources.find(n);
if (it != control_sources.end()) {
VLOG(4) << "Found a path to control source: " << side_effect->name()
<< " ---> " << (*it)->name();
will_execute = true;
}
};
DFSFrom(*fbody.graph, {side_effect}, is_control_source,
{}, NodeComparatorName{});
if (!will_execute) {
return absl::InternalError(absl::StrCat(
"Can't guarantee execution of a side-effectful node, that is not "
"reachable from function control source. Function body node: ",
SummarizeNode(*side_effect)));
}
}
return absl::OkStatus();
}
Status ValidateNoDeadOutputs(const FunctionLibraryDefinition& flib_def,
const FunctionBody& fbody) {
absl::flat_hash_set<const Node*> output_nodes = {fbody.ret_nodes.begin(),
fbody.ret_nodes.end()};
std::vector<const Node*> dead_tensor_sources;
for (const Node* n : fbody.graph->nodes()) {
if (n->IsSwitch()) {
VLOG(4) << "Add dead tensors source. Switch node: " << n->name();
dead_tensor_sources.push_back(n);
continue;
}
const FunctionDef* fdef = flib_def.Find(n->type_string());
if (fdef != nullptr) {
std::unique_ptr<FunctionBody> nested_fbody;
NameAttrList func;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(n->def(), &func));
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, &nested_fbody));
if (!ValidateNoDeadOutputs(flib_def, *nested_fbody).ok()) {
VLOG(4) << "Add dead tensors source. Function call: " << func.name()
<< " node=" << n->name();
dead_tensor_sources.push_back(n);
}
}
}
for (const Node* dead_tensor_source : dead_tensor_sources) {
bool has_dead_output = false;
const auto is_output_node = [&](const Node* n) -> void {
const auto it = output_nodes.find(n);
if (it != output_nodes.end()) {
VLOG(4) << "Found a path to output node from dead tensor source: "
<< dead_tensor_source->name() << " ---> " << (*it)->name();
has_dead_output = true;
}
};
const auto stop_traversal = [&has_dead_output](const Edge& edge) -> bool {
return !edge.src()->IsMerge() || has_dead_output;
};
DFSFrom(*fbody.graph, {dead_tensor_source}, is_output_node,
{}, NodeComparatorName{},
stop_traversal);
if (has_dead_output) {
return absl::InternalError(absl::StrCat(
"Can't inline a function with dead outputs. Dead tensor source: ",
SummarizeNode(*dead_tensor_source)));
}
}
return absl::OkStatus();
}
Status MakeFunctionBodyForInlining(const Node& node,
const FunctionLibraryDefinition& flib_def,
std::unique_ptr<FunctionBody>* fbody) {
VLOG(3) << "Make function body for inlining: " << SummarizeNode(node);
const auto find_fdef = [&flib_def, &node](
const string& name,
const FunctionDef** fdef) -> Status {
if ((*fdef = flib_def.Find(name)) == nullptr) {
return absl::InternalError(absl::StrCat(
"Was not able to find a function definition (name=", name,
") for a function call: ", SummarizeNode(node)));
}
return absl::OkStatus();
};
if (node.type_string() == FunctionLibraryDefinition::kGradientOp) {
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), kFuncAttr, &func));
const string grad = flib_def.FindGradient(func.name());
if (!grad.empty()) {
const FunctionDef* grad_fdef;
TF_RETURN_IF_ERROR(find_fdef(grad, &grad_fdef));
VLOG(4) << "Instantiate a custom SymbolicGradient: gradient=" << grad
<< " (function=" << func.name() << ")";
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*grad_fdef, AttrSlice(&func.attr()), &flib_def, fbody));
} else if (flib_def.Find(func.name()) == nullptr) {
gradient::Creator creator;
TF_RETURN_IF_ERROR(gradient::GetOpGradientCreator(func.name(), &creator));
if (creator == nullptr) {
return absl::InvalidArgumentError(
absl::StrCat("No gradient is defined for ", func.name()));
}
FunctionDef grad_fdef;
TF_RETURN_IF_ERROR(creator(AttrSlice(&func.attr()), &grad_fdef));
VLOG(4) << "Instantiate a SymbolicGradient for a primitive op: "
<< func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
grad_fdef, AttrSlice(&func.attr()), &flib_def, fbody));
} else {
const FunctionDef* fdef;
TF_RETURN_IF_ERROR(find_fdef(func.name(), &fdef));
VLOG(4) << "Instantiate a SymbolicGradient for a function: "
<< func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, fbody));
*fbody = SymbolicGradient(**fbody);
}
} else {
NameAttrList func;
TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node.def(), &func));
const FunctionDef* fdef;
TF_RETURN_IF_ERROR(find_fdef(func.name(), &fdef));
VLOG(4) << "Instantiate a function call: function=" << func.name();
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, AttrSlice(&func.attr()),
&flib_def, fbody));
}
return absl::OkStatus();
}
void AddStrictInputSemantics(Node* caller, Graph* g) {
absl::flat_hash_set<const Node*> existing_control_sources;
for (const Edge* edge : caller->in_edges()) {
if (edge->IsControlEdge()) {
existing_control_sources.insert(edge->src());
}
}
const bool has_incoming_control_edges = !existing_control_sources.empty();
const bool has_resource_input =
absl::c_any_of(caller->input_types(),
[](const DataType dtype) { return dtype == DT_RESOURCE; });
const bool has_constant_enter_input =
absl::c_any_of(caller->in_edges(), [](const Edge* edge) {
Node* src = edge->src();
return src->IsEnter() && CheckBoolAttr(src, "is_constant");
});
const bool requires_strict_semantics =
(!has_incoming_control_edges && has_resource_input) ||
(has_constant_enter_input);
if (!requires_strict_semantics) return;
std::set<const Node*> data_inputs;
for (const Edge* edge : caller->in_edges()) {
if (!edge->IsControlEdge() &&
!existing_control_sources.contains(edge->src())) {
data_inputs.insert(edge->src());
}
}
VLOG(3) << "Add control edges from all data inputs to enforce strict "
"semantics with regard to function inputs";
const auto is_placeholder = [](const Node* node) -> bool {
return node->type_string() == "Placeholder";
};
for (const Node* node : data_inputs) {
if (is_placeholder(node)) continue;
g->AddControlEdge(g->FindNodeId(node->id()), caller,
true);
}
}
void AddFrameForwardingControlEdge(const std::vector<ControlFlowInfo>& info,
Node* caller, Graph* g) {
int info_size = info.size();
if (caller->id() >= info_size) return;
const Node* frame = info[caller->id()].frame;
const bool is_in_while_loop = frame->id() != Graph::kSourceId;
if (!is_in_while_loop) return;
const bool has_incoming_control_edges =
absl::c_any_of(caller->in_edges(),
[](const Edge* edge) { return edge->IsControlEdge(); });
if (has_incoming_control_edges) return;
VLOG(3) << "Add a frame forwarding control edge: from=" << frame->name()
<< " to=" << caller->name();
Node* enter = g->FindNodeId(frame->id());
bool is_constant_enter = enter->attrs().Find("is_constant")->b();
if (is_constant_enter) {
g->AddControlEdge(enter, caller);
} else {
auto it = absl::c_find_if(enter->out_edges(), [](const Edge* e) {
return !e->IsControlEdge() && e->dst()->IsMerge();
});
if (it != enter->out_edges().end()) {
g->AddControlEdge((*it)->dst(), caller);
} else {
LOG(WARNING) << "Enter[is_constant=false] node: " << enter->name()
<< " does not have an outgoing edge to a Merge.";
}
}
}
Status InlineFunctionCalls(const GrapplerItem& item,
const RewriterConfig::Toggle opt_level,
const bool lower_control_flow,
GraphDef* output_graph) {
bool is_aggressive = opt_level == RewriterConfig::AGGRESSIVE;
VLOG(2) << "Inline function calls: grappler_item_id=" << item.id
<< " (aggressive_mode=" << is_aggressive << ")";
FunctionLibraryDefinition flib_def =
FunctionLibraryDefinition(OpRegistry::Global(), item.graph.library());
std::unique_ptr<Graph> graph = std::make_unique<Graph>(flib_def);
GraphConstructorOptions graph_constructor_options;
graph_constructor_options.allow_internal_ops = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(graph_constructor_options,
item.graph, graph.get()));
using NodeNames = absl::flat_hash_set<absl::string_view>;
NodeNames fetch_nodes;
fetch_nodes.reserve(item.fetch.size());
for (const string& fetch : item.fetch) {
fetch_nodes.insert(ParseTensorName(fetch).node());
}
NodeNames keep_nodes(item.keep_ops.begin(), item.keep_ops.end());
if (item.save_op.size() > 0) {
keep_nodes.insert(item.save_op);
}
if (item.restore_op.size() > 0) {
keep_nodes.insert(item.restore_op);
}
std::vector<string> inlined_function_names;
NodeNames feed_nodes;
feed_nodes.reserve(item.feed.size());
for (const std::pair<std::string, Tensor>& feed : item.feed) {
feed_nodes.insert(ParseTensorName(feed.first).node());
}
std::vector<ControlFlowInfo> control_flow_info;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph.get(), &control_flow_info));
for (int i = 2; i < graph->num_node_ids(); ++i) {
Node* n = graph->FindNodeId(i);
if (n == nullptr) continue;
if (lower_control_flow && LowerUsingSwitchMergeIsOn(n)) {
VLOG(2) << "Lower functional control flow op: " << SummarizeNode(*n);
AddStrictInputSemantics(n, graph.get());
AddFrameForwardingControlEdge(control_flow_info, n, graph.get());
if (n->IsIfNode()) {
TF_RETURN_IF_ERROR(RewriteIfNode(n, graph.get(), false));
} else if (n->IsCaseNode()) {
TF_RETURN_IF_ERROR(RewriteCaseNode(n, graph.get(), false));
} else if (n->IsWhileNode()) {
TF_RETURN_IF_ERROR(RewriteWhileNode(n, graph.get(), &flib_def, false));
}
continue;
}
if (!IsFunctionCall(flib_def, *n)) continue;
if (MarkedForXlaCompilation(n->def())) continue;
if (feed_nodes.contains(n->name())) continue;
if (n->name() == item.restore_op || n->name() == item.save_op) continue;
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(MakeFunctionBodyForInlining(*n, flib_def, &fbody));
InlineFunctionBodyOptions inline_options;
inline_options.ignore_noinline = is_aggressive;
bool force_inline_as_multi_device = LowerAsMultiDeviceFunctionIsOn(n);
if (n->IsPartitionedCall() || force_inline_as_multi_device) {
inline_options.output_control_src = OutputControlSource::kControlOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::MultiDevice();
} else {
inline_options.output_control_src = OutputControlSource::kDataOutputs;
inline_options.inlined_function_body_placer =
InlinedFunctionBodyPlacer::SingleDevice();
}
if (fetch_nodes.contains(n->name())) {
inline_options.keep_caller_node = KeepCallerNode::kFetchable;
} else if (keep_nodes.contains(n->name())) {
inline_options.keep_caller_node = KeepCallerNode::kTargetable;
} else {
inline_options.keep_caller_node = KeepCallerNode::kDoNotKeep;
}
Status can_inline_function_call =
ValidateInlining(n, fbody.get(), inline_options);
if (can_inline_function_call.ok()) {
bool has_outgoing_control_edges = absl::c_any_of(
n->out_edges(),
[](const Edge* edge) { return edge->IsControlEdge(); });
can_inline_function_call = ValidateSideEffectsExecution(
*fbody, inline_options.output_control_src,
has_outgoing_control_edges);
if (!can_inline_function_call.ok() &&
(is_aggressive || force_inline_as_multi_device)) {
VLOG(2) << "Ignore error: " << can_inline_function_call.message();
can_inline_function_call = absl::OkStatus();
}
}
if (can_inline_function_call.ok()) {
can_inline_function_call = ValidateNoDeadOutputs(flib_def, *fbody);
}
if (can_inline_function_call.ok()) {
VLOG(2) << "Inline function call node: " << n->name();
AddStrictInputSemantics(n, graph.get());
AddFrameForwardingControlEdge(control_flow_info, n, graph.get());
TF_RETURN_IF_ERROR(InlineFunctionBody(flib_def, graph.get(), n,
fbody.get(), inline_options));
inlined_function_names.push_back(
fbody->record->fdef().signature().name());
} else {
VLOG(2) << "Failed to inline function call node: "
<< can_inline_function_call.message();
}
}
VLOG(4) << "Inlined " << inlined_function_names.size()
<< " function calls: " << absl::StrJoin(inlined_function_names, ", ");
if (inlined_function_names.empty()) {
VLOG(3) << "Not placing graph after function inlining"
<< " (did not inline any of the function calls).";
} else if (item.devices().empty()) {
VLOG(3) << "Not placing graph after function inlining"
<< " (device set is empty)";
} else {
VLOG(3) << "Run placer for the graph after function inlining. "
<< "Devices: [" << absl::StrJoin(item.devices(), ", ") << "]";
DeviceSet device_set;
std::vector<std::unique_ptr<Device>> fake_devices;
for (const string& name : item.devices()) {
auto device = std::make_unique<FakeDevice>(name);
device_set.AddDevice(device.get());
fake_devices.push_back(std::move(device));
}
Placer placer(graph.get(), item.id, &flib_def, &device_set);
TF_RETURN_IF_ERROR(placer.Run());
}
graph->ToGraphDef(output_graph);
return absl::OkStatus();
}
void RestoreTensorMapping(const FunctionOptimizerContext& ctx,
GraphDef* optimized_graph) {
if (ctx.tensor_mapping().empty()) return;
for (NodeDef& node : *optimized_graph->mutable_node()) {
for (int idx = 0; idx < node.input_size(); ++idx) {
TensorId input_tensor = ParseTensorName(node.input(idx));
if (input_tensor.index() == Graph::kControlSlot) break;
auto mapping = ctx.tensor_mapping().find(input_tensor);
if (mapping != ctx.tensor_mapping().end()) {
node.set_input(idx, TensorIdToString(mapping->second));
}
}
}
}
}
Status FunctionOptimizer::RunFunctionOptimizerPass(
const GrapplerItem& item, GraphDef* optimized_graph) const {
VLOG(3) << "Run function optimizer pass: grappler_item_id=" << item.id;
GraphDef graph_after_inlining;
TF_RETURN_IF_ERROR(InlineFunctionCalls(item, opt_level_, lower_control_flow_,
&graph_after_inlining));
FunctionOptimizerContext ctx(item, opt_level_, graph_after_inlining);
for (const NodeDef& node : graph_after_inlining.node()) {
const int num_nodes_before = optimized_graph->node_size();
const auto is_graph_modified = [&]() {
int num_nodes = optimized_graph->node_size();
DCHECK_GE(num_nodes, num_nodes_before) << "Nodes should not be removed";
return num_nodes > num_nodes_before;
};
const auto copy_node = [&]() { *optimized_graph->add_node() = node; };
const FunctionDef* func = FindFunctionCall(ctx, node);
if (func == nullptr) {
copy_node();
continue;
}
const string& func_name = func->signature().name();
const bool specialization_worthy = IsParametrized(*func) ||
HasTrulyConstInputs(node, ctx) ||
HasUnusedOutputs(node, *func, ctx);
const string grad_func = ctx.function_library().FindGradient(func_name);
const bool no_specialize =
!grad_func.empty() || ctx.IsFeedNode(node.name()) ||
MarkedNoSpecialize(*func) || MarkedForXlaCompilation(node);
if (specialization_worthy && !no_specialize) {
Status status = SpecializeFunction(node, *func, &ctx, optimized_graph);
if (!status.ok() && is_graph_modified()) {
return status;
} else if (!status.ok() && !is_graph_modified()) {
VLOG(3) << "Skip specialization error: " << status.message();
copy_node();
}
continue;
} else {
VLOG(2) << "Skip function specialization: " << func->signature().name();
copy_node();
}
}
RestoreTensorMapping(ctx, optimized_graph);
*optimized_graph->mutable_versions() = item.graph.versions();
*optimized_graph->mutable_library() =
PruneFunctionLibrary(ctx.function_library(), *optimized_graph);
return absl::OkStatus();
}
Status FunctionOptimizer::Optimize(Cluster*, const GrapplerItem& item,
GraphDef* optimized_graph) {
if (item.graph.library().function_size() == 0) {
return absl::AbortedError("Nothing to do.");
}
TF_RETURN_IF_ERROR(RunFunctionOptimizerPass(item, optimized_graph));
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/function_optimizer.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/gtl/flatset.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kDevice[] = "/job:localhost/replica:0/task:0/device:CPU:0";
}
class FunctionOptimizerTest : public GrapplerTest {};
TEST_F(FunctionOptimizerTest, InlineFunction_SimpleFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
const string arg0 = "Func/y/input/_0";
const string ret0 = "Func/y/output/_1";
const Tensor kTwo = test::AsScalar<int64_t>(2);
GraphDef expected = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef(arg0, "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("y/two", "Const", {}, {{"dtype", DT_INT64}, {"value", kTwo}}),
NDef("y/scale", "Cast", {"y/two"},
{{"DstT", DT_FLOAT}, {"SrcT", DT_INT64}}),
NDef("y/y", "Mul", {arg0, "y/scale"}, {{"T", DT_FLOAT}}),
NDef(ret0, "Identity", {"y/y"}, {{"T", DT_FLOAT}}),
NDef("z", "Identity", {ret0}, {{"T", DT_FLOAT}})},
{});
for (NodeDef& node : *expected.mutable_node()) node.set_device(kDevice);
CompareGraphs(expected, output);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FixedTypeFunction) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<float>(2.0f);
FunctionDef x_times_two = FunctionDefHelper::Define(
"XTimesTwo",
{"x: float"},
{"y: float"},
{},
{
{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_FLOAT}}},
{{"enter"},
"Enter",
{"x"},
{{"T", DT_FLOAT}, {"frame_name", "frame"}}},
{{"y"}, "Mul", {"x", "two"}, {{"T", DT_FLOAT}}},
});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
x_times_two,
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "XTimesTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithOutputMapping) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"Exp_func",
{"in: float"},
{"out: float"},
{},
{{{"Linear_func"}, "Identity", {"in"}, {{"T", DT_FLOAT}}},
{{"Exp"}, "Exp", {"Linear_func:output:0"}, {{"T", DT_FLOAT}}}},
{{"out", "Exp:y:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "Exp_func", {"x"}, {}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "Exp_func");
}
EXPECT_EQ(output.library().function_size(), 0);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithInputForwarding) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef func = FunctionDefHelper::Create(
"ForwardInputs",
{"in0: float", "in1: float", "arg2: float", "arg3: int32", "arg4: float"},
{"out0: float", "arg2: float", "arg3: int32"},
{},
{},
{{"out0", "in0"}, {"arg2", "arg2"}, {"arg3", "arg3"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("x0", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x1", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x2", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("x3", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("x4", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "ForwardInputs", {"x0", "x1", "x2", "x3", "x4"}, {}, kDevice),
NDef("z0", "Identity", {"y:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z1", "Identity", {"y:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z2", "Identity", {"y:2"}, {{"T", DT_INT32}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "ForwardInputs");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z0", "z1", "z2"};
item.feed.emplace_back("x0", test::AsScalar<float>(3.14f));
item.feed.emplace_back("x1", test::AsScalar<float>(2.7f));
item.feed.emplace_back("x2", test::AsScalar<float>(1.0f));
item.feed.emplace_back("x4", test::AsScalar<float>(-1.0f));
item.feed.emplace_back("x3", test::AsScalar<int>(1234));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
test::ExpectTensorEqual<int>(tensors_expected[2], tensors[2]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithoutInput) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
const Tensor kTwo = test::AsScalar<int64_t>(2);
FunctionDef func = FunctionDefHelper::Define(
"GenerateTwo",
{},
{"o: T"},
{"T: {float, double}"},
{{{"two"}, "Const", {}, {{"value", kTwo}, {"dtype", DT_INT64}}},
{{"o"}, "Cast", {"two"}, {{"SrcT", DT_INT64}, {"DstT", "$T"}}}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("y", "GenerateTwo", {}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
{
func,
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "GenerateTwo");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"z"};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineFunction_FunctionWithNestedFunctionCall) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "MyMul", {"x", "x"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("square", "MySquare", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("outputs", "Identity", {"square:0"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func, square_func});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "MySquare");
EXPECT_NE(node.op(), "MyMul");
}
EXPECT_EQ(output.library().function_size(), 0);
item.fetch = {"outputs"};
item.feed.emplace_back("a", test::AsScalar<float>(2.0f));
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_TestFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected =
EvaluateNodes(item.graph, {"out1", "out2"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out1", "out2"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
test::ExpectTensorEqual<float>(expected[1], optimized[1]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradient_IdentityFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
FunctionDef func = FunctionDefHelper::Create(
"Identity_func",
{"in: float"},
{"out: float"},
{},
{{{"Identity"}, "Identity", {"in"}, {{"T", DT_FLOAT}}}},
{{"out", "Identity:output:0"}});
auto x = ops::Const(scope, 1.0f, {3, 5, 7});
auto z = ops::Const(scope, 3.0f, {3, 5, 7});
NameAttrList fn;
fn.set_name("Identity_func");
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, z},
{DT_FLOAT}, fn);
auto out = ops::Identity(scope.WithOpName("out"), g0.output[0]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
for (const NodeDef& node : output.node()) {
EXPECT_NE(node.op(), "SymbolicGradient");
}
EXPECT_EQ(output.library().function_size(), 0);
std::vector<Tensor> expected = EvaluateNodes(item.graph, {"out"}, {});
std::vector<Tensor> optimized = EvaluateNodes(output, {"out"}, {});
test::ExpectTensorEqual<float>(expected[0], optimized[0]);
}
TEST_F(FunctionOptimizerTest, InlineSymbolicGradientNoInlineFunc) {
FunctionOptimizer optimizer(RewriterConfig::ON, true);
FunctionDef func = FunctionDefHelper::Define(
"TestFunc", {"x:float", "y:float"}, {"l:float"}, {},
{
{{"z"}, "Add", {"x", "y"}, {{"T", DT_FLOAT}}},
FunctionDefHelper::Const("zero", 0),
FunctionDefHelper::Const("one", 1),
{{"r"}, "Rank", {"z"}, {{"T", DT_FLOAT}}},
{{"indices"}, "Range", {"zero", "r", "one"}},
{{"l"}, "Sum", {"z", "indices"}, {{"T", DT_FLOAT}}},
});
(*func.mutable_attr())["_noinline"].set_b(true);
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
auto x = ops::Const(scope, 1.0f);
auto y = ops::Const(scope, 2.0f);
auto dl = ops::Const(scope, 3.0f);
NameAttrList fn;
fn.set_name("TestFunc");
(*fn.mutable_attr())["T"].set_type(DT_FLOAT);
auto g0 = ops::SymbolicGradient(scope, std::initializer_list<Input>{x, y, dl},
{DT_FLOAT, DT_FLOAT}, fn);
auto out1 = ops::Identity(scope.WithOpName("out1"), g0.output[0]);
auto out2 = ops::Identity(scope.WithOpName("out2"), g0.output[1]);
GrapplerItem item;
TF_EXPECT_OK(scope.ToGraphDef(&item.graph));
*item.graph.mutable_library()->add_function() = func;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
CompareGraphs(item.graph, output);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionSimpleFunction) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func} );
Tensor pi = test::AsScalar<float>(3.14f);
item.feed.emplace_back("a", pi);
item.feed.emplace_back("b", pi);
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
{
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
GraphDef optimized_graph;
TF_EXPECT_OK(item.AddDevice(kDevice));
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, kDevice),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, kDevice),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithControlDependencies) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::ON, true);
const Tensor kOne = test::AsScalar<float>(1.0);
const Tensor kTwo = test::AsScalar<float>(2.0);
const TensorShape scalar = TensorShape({});
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T", "v: resource"}, {"z:T"}, {"T: {float, double}"},
{{{"one"}, "Const", {}, {{"value", kOne}, {"dtype", DT_FLOAT}}},
{{"add"},
"AssignAddVariableOp",
{"v", "one:output:0"},
{{"dtype", DT_FLOAT}}},
{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}},
{{"size_effects", "add"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out_1", "out_2"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}}),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("f1", "PartitionedCall", {"a", "b", "v", "^init_v"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("f2", "PartitionedCall", {"f1", "f1", "v", "^f1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out_1", "Identity", {"f2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out_2", "ReadVariableOp", {"v", "^f1", "^f2"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("v", "VarHandleOp", {}, {{"dtype", DT_FLOAT}, {"shape", scalar}},
kDevice),
NDef("init_v", "AssignVariableOp", {"v", "a"}, {{"dtype", DT_FLOAT}},
kDevice),
NDef("Func/f1/input_control_node/_0", "NoOp", {"^init_v"}, {}, kDevice),
NDef("Func/f1/input/_1", "Identity",
{"a", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_2", "Identity",
{"b", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_3", "Identity",
{"v", "^Func/f1/input_control_node/_0"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f1/one", "Const", {"^Func/f1/input_control_node/_0"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f1/mul", "Mul", {"Func/f1/input/_1", "Func/f1/input/_2"},
{{"T", DT_FLOAT}}, kDevice),
NDef("f1/add", "AssignAddVariableOp", {"Func/f1/input/_3", "f1/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("Func/f1/output/_4", "Identity", {"f1/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/output_control_node/_5", "NoOp", {"^f1/add"}, {}, kDevice),
NDef("Func/f2/input_control_node/_6", "NoOp",
{"^Func/f1/output_control_node/_5"}, {}, kDevice),
NDef("Func/f2/input/_7", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_8", "Identity",
{"Func/f1/output/_4", "^Func/f2/input_control_node/_6"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_9", "Identity",
{"v", "^Func/f2/input_control_node/_6"}, {{"T", DT_RESOURCE}},
kDevice),
NDef("f2/one", "Const", {"^Func/f2/input_control_node/_6"},
{{"dtype", DT_FLOAT}, {"value", kOne}}, kDevice),
NDef("f2/add", "AssignAddVariableOp", {"Func/f2/input/_9", "f2/one"},
{{"dtype", DT_FLOAT}}, kDevice),
NDef("f2/mul", "Mul", {"Func/f2/input/_7", "Func/f2/input/_8"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/output/_10", "Identity", {"f2/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f2/output_control_node/_11", "NoOp", {"^f2/add"}, {},
kDevice),
NDef("out_1", "Identity", {"Func/f2/output/_10"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out_2", "ReadVariableOp",
{"v", "^Func/f1/output_control_node/_5",
"^Func/f2/output_control_node/_11"},
{{"dtype", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
item.feed.emplace_back("a", kOne);
item.feed.emplace_back("b", kTwo);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 2);
EXPECT_EQ(tensors_expected[0].flat<float>()(0), 4.0);
EXPECT_EQ(tensors_expected[1].flat<float>()(0), 3.0);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "Identity", {"c"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_x = "Func/c/input/_0";
const string input_y = "Func/c/input/_1";
const string output_z = "Func/c/output/_2";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef(input_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, cpu1),
NDef("c/mul", "Mul", {input_x, input_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef("d", "Identity", {output_z}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
CompareGraphs(expected, optimized_graph);
}
TEST_F(FunctionOptimizerTest,
InlineMultipleIndirectFunctionWithDevicePlacement) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
(*mul_func.mutable_node_def())[0].set_device("/device:CPU:1");
const string cpu0 = "/job:work/replica:1/task:1/device:CPU:0";
const string cpu1 = "/job:work/replica:1/task:1/device:CPU:1";
GrapplerItem item;
item.fetch = {"e"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef("c", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("d", "PartitionedCall", {"a", "c"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
cpu0),
NDef("e", "Identity", {"d"}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
ASSERT_TRUE(item.InferDevicesFromGraph().ok());
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const string input_c_x = "Func/c/input/_0";
const string input_c_y = "Func/c/input/_1";
const string output_c_z = "Func/c/output/_2";
const string input_d_x = "Func/d/input/_3";
const string input_d_y = "Func/d/input/_4";
const string output_d_z = "Func/d/output/_5";
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu0),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, cpu1),
NDef(input_c_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_c_y, "Identity", {"b"}, {{"T", DT_FLOAT}}, cpu1),
NDef("c/mul", "Mul", {input_c_x, input_c_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_c_z, "Identity", {"c/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef(input_d_x, "Identity", {"a"}, {{"T", DT_FLOAT}}, cpu0),
NDef(input_d_y, "Identity", {output_c_z}, {{"T", DT_FLOAT}}, cpu1),
NDef("d/mul", "Mul", {input_d_x, input_d_y}, {{"T", DT_FLOAT}}, cpu1),
NDef(output_d_z, "Identity", {"d/mul"}, {{"T", DT_FLOAT}}, cpu1),
NDef("e", "Identity", {output_d_z}, {{"T", DT_FLOAT}}, cpu0)},
{mul_func});
CompareGraphs(expected, optimized_graph);
}
TEST_F(FunctionOptimizerTest,
InlineIndirectFunctionWithControlDependencyAndNoSideEffects) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
const Tensor kOne = test::AsScalar<float>(1.0);
const Tensor kTwo = test::AsScalar<float>(2.0);
const TensorShape scalar = TensorShape({});
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "NoOp", {}, {}, kDevice),
NDef("f1", "PartitionedCall", {"a", "b", "^c"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("f2", "PartitionedCall", {"f1", "f1", "^f1"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out", "Identity", {"f2"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "NoOp", {}, {}, kDevice),
NDef("Func/f1/input_control_node/_0", "NoOp", {"^c"}, {}, kDevice),
NDef("Func/f1/input/_1", "Identity",
{"a", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/input/_2", "Identity",
{"b", "^Func/f1/input_control_node/_0"}, {{"T", DT_FLOAT}},
kDevice),
NDef("f1/mul", "Mul", {"Func/f1/input/_1", "Func/f1/input/_2"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f1/output/_3", "Identity", {"f1/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("Func/f1/output_control_node/_4", "NoOp",
{"^Func/f1/input_control_node/_0"}, {}, kDevice),
NDef("Func/f2/input_control_node/_5", "NoOp",
{"^Func/f1/output_control_node/_4"}, {}, kDevice),
NDef("Func/f2/input/_6", "Identity",
{"Func/f1/output/_3", "^Func/f2/input_control_node/_5"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/input/_7", "Identity",
{"Func/f1/output/_3", "^Func/f2/input_control_node/_5"},
{{"T", DT_FLOAT}}, kDevice),
NDef("f2/mul", "Mul", {"Func/f2/input/_6", "Func/f2/input/_7"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/f2/output/_8", "Identity", {"f2/mul"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out", "Identity", {"Func/f2/output/_8"}, {{"T", DT_FLOAT}},
kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
item.feed.emplace_back("a", kOne);
item.feed.emplace_back("b", kTwo);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionDoNotInlineDeadOutputs) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef dead_outputs = FunctionDefHelper::Create(
"DeadOutputs", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{
{{"switch"}, "Switch", {"x", "cond"}, {{"T", "$T"}}},
{{"if_false"}, "Identity", {"switch:output_false:0"}, {{"T", "$T"}}},
{{"if_true"}, "Identity", {"switch:output_true:0"}, {{"T", "$T"}}},
},
{{"z", "if_false:output:0"}});
FunctionDef proxy_func = FunctionDefHelper::Create(
"Proxy", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{{{"dead"}, "DeadOutputs", {"x", "cond"}, {{"T", "$T"}}}},
{{"z", "dead:z:0"}});
GrapplerItem item;
item.fetch = {"out0", "out1"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("fn0", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("DeadOutputs", {{"T", DT_FLOAT}})}},
kDevice),
NDef("fn1", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("Proxy", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out0", "Identity", {"fn0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("out1", "Identity", {"fn1"}, {{"T", DT_FLOAT}}, kDevice)},
{dead_outputs, proxy_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = item.graph;
CompareGraphs(expected, optimized_graph);
const Tensor one = test::AsScalar<float>(1.0);
item.feed.emplace_back("a", one);
item.feed.emplace_back("b", test::AsScalar<bool>(false));
auto tensors = EvaluateFetchNodes(item);
ASSERT_EQ(tensors.size(), 2);
test::ExpectTensorEqual<float>(tensors[0], one);
test::ExpectTensorEqual<float>(tensors[1], one);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithMergedDeadTensors) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef no_dead_outputs = FunctionDefHelper::Create(
"NoDeadOutputs", {"x:T", "cond:bool"}, {"z:T"}, {"T: {float, double}"},
{
{{"switch"}, "Switch", {"x", "cond"}, {{"T", "$T"}}},
{{"if_false"}, "Identity", {"switch:output_false:0"}, {{"T", "$T"}}},
{{"if_true"}, "Identity", {"switch:output_true:0"}, {{"T", "$T"}}},
{{"merge"},
"Merge",
{"if_false:output:0", "if_true:output:0"},
{{"T", "$T"}, {"N", 2}}},
},
{{"z", "merge:output:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"out"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("fn", "PartitionedCall", {"a", "b"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_BOOL}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("NoDeadOutputs", {{"T", DT_FLOAT}})}},
kDevice),
NDef("out", "Identity", {"fn"}, {{"T", DT_FLOAT}}, kDevice)},
{no_dead_outputs});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("Func/fn/input/_0", "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("Func/fn/input/_1", "Identity", {"b"}, {{"T", DT_BOOL}}, kDevice),
NDef("fn/switch", "Switch", {"Func/fn/input/_0", "Func/fn/input/_1"},
{{"T", DT_FLOAT}}, kDevice),
NDef("fn/if_false", "Identity", {"fn/switch"}, {{"T", DT_FLOAT}},
kDevice),
NDef("fn/if_true", "Identity", {"fn/switch:1"}, {{"T", DT_FLOAT}},
kDevice),
NDef("fn/merge", "Merge", {"fn/if_false", "fn/if_true"},
{{"T", DT_FLOAT}, {"N", 2}}, kDevice),
NDef("Func/fn/output/_2", "Identity", {"fn/merge"}, {{"T", DT_FLOAT}},
kDevice),
NDef("out", "Identity", {"Func/fn/output/_2"}, {{"T", DT_FLOAT}},
kDevice)},
{no_dead_outputs});
CompareGraphs(expected, optimized_graph);
const Tensor one = test::AsScalar<float>(1.0);
item.feed.emplace_back("a", one);
item.feed.emplace_back("b", test::AsScalar<bool>(false));
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<float>(tensors[0], tensors_expected[0]);
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithNestedFunctionCall) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef square_func = FunctionDefHelper::Create(
"MySquare", {"x:T"}, {"output:T"}, {"T: {float, double}"},
{{{"square"},
"PartitionedCall",
{"x", "x"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}}}},
{{"output", "square:output:0"}});
GrapplerItem item;
TF_EXPECT_OK(item.AddDevice(kDevice));
item.fetch = {"c"};
item.graph = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "PartitionedCall", {"a"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MySquare", {{"T", DT_FLOAT}})}},
kDevice),
NDef("c", "Identity", {"b"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func, square_func});
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected = test::function::GDef(
{NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("Func/b/input/_0", "Identity", {"a"}, {{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/input/_2", "Identity", {"Func/b/input/_0"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/input/_3", "Identity", {"Func/b/input/_0"},
{{"T", DT_FLOAT}}, kDevice),
NDef("b/square/mul", "Mul",
{"Func/b/square/input/_2", "Func/b/square/input/_3"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/square/output/_4", "Identity", {"b/square/mul"},
{{"T", DT_FLOAT}}, kDevice),
NDef("Func/b/output/_1", "Identity", {"Func/b/square/output/_4"},
{{"T", DT_FLOAT}}, kDevice),
NDef("c", "Identity", {"Func/b/output/_1"}, {{"T", DT_FLOAT}}, kDevice)},
{mul_func});
CompareGraphs(expected, optimized_graph);
Tensor three = test::AsScalar<float>(3.0f);
item.feed.emplace_back("a", three);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
auto tensors_expected = EvaluateFetchNodes(item);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), 1);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
GrapplerItem ConditionalAdd() {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionDef add_func = FDH::Create(
"MyAdd", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"add"}, "Add", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "add:z:0"}});
FunctionDef mul_func = FDH::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"mul"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "mul:z:0"}});
FunctionDef add_or_mul_func = FDH::Create(
"AddOrMul", {"cond:bool", "x:float", "y:float"}, {"z:float"}, {},
{
{{"if_node"},
"If",
{"cond", "x", "y"},
{
{"Tcond", DT_BOOL},
{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"then_branch", FDH::FunctionRef("MyAdd", {{"T", DT_FLOAT}})},
{"else_branch", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})},
{"_lower_using_switch_merge", true},
}},
},
{{"z", "if_node:output:0"}}, {{"side_effect", "if_node"}});
GrapplerItem item;
item.fetch = {"d"};
item.graph = test::function::GDef(
{NDef("is_add", "Placeholder", {}, {{"dtype", DT_BOOL}}, kDevice),
NDef("a", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("b", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("c", "PartitionedCall", {"is_add", "a", "b"},
{{"Tin", DataTypeSlice{DT_BOOL, DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("AddOrMul")}},
kDevice),
NDef("d", "Identity", {"c", "^c"}, {{"T", DT_FLOAT}}, kDevice)},
{add_or_mul_func, add_func, mul_func});
return item;
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionWithFunctionalControlFlow) {
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE, true);
GrapplerItem item = ConditionalAdd();
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const auto count_nodes_with_op = [&](const string& op) {
return absl::c_count_if(optimized_graph.node(), [&](const NodeDef& node) {
return node.op() == op;
});
};
EXPECT_EQ(count_nodes_with_op("PartitionedCall"), 0);
EXPECT_EQ(count_nodes_with_op("If"), 0);
EXPECT_EQ(count_nodes_with_op("Switch"), 3);
EXPECT_EQ(count_nodes_with_op("Merge"), 2);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
Tensor one = test::AsScalar<float>(1.0);
Tensor two = test::AsScalar<float>(2.0);
Tensor three = test::AsScalar<float>(3.0);
const auto feed_args = [&](bool is_add) {
std::vector<std::pair<string, Tensor>> feed;
feed.emplace_back("a", one);
feed.emplace_back("b", two);
feed.emplace_back("is_add", test::AsScalar<bool>(is_add));
return feed;
};
{
item.feed = feed_args(true);
optimized.feed = feed_args(true);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], three);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
item.feed = feed_args(false);
optimized.feed = feed_args(false);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], two);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, InlineIndirectFunctionDontLowerControlFlow) {
FunctionOptimizer optimizer(RewriterConfig::AGGRESSIVE,
false);
GrapplerItem item = ConditionalAdd();
GraphDef optimized_graph;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
const auto count_nodes_with_op = [&](const string& op) {
return absl::c_count_if(optimized_graph.node(), [&](const NodeDef& node) {
return node.op() == op;
});
};
EXPECT_EQ(count_nodes_with_op("PartitionedCall"), 0);
EXPECT_EQ(count_nodes_with_op("If"), 1);
EXPECT_EQ(count_nodes_with_op("Switch"), 0);
EXPECT_EQ(count_nodes_with_op("Merge"), 0);
GrapplerItem optimized = item.WithGraph(std::move(optimized_graph));
Tensor one = test::AsScalar<float>(1.0);
Tensor two = test::AsScalar<float>(2.0);
Tensor three = test::AsScalar<float>(3.0);
const auto feed_args = [&](bool is_add) {
std::vector<std::pair<string, Tensor>> feed;
feed.emplace_back("a", one);
feed.emplace_back("b", two);
feed.emplace_back("is_add", test::AsScalar<bool>(is_add));
return feed;
};
{
item.feed = feed_args(true);
optimized.feed = feed_args(true);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], three);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
{
item.feed = feed_args(false);
optimized.feed = feed_args(false);
auto tensors_expected = EvaluateFetchNodes(item);
ASSERT_EQ(tensors_expected.size(), 1);
test::ExpectTensorEqual<float>(tensors_expected[0], two);
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), tensors_expected.size());
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionXTimesTwo) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef x_times_two = test::function::XTimesTwo();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {x_times_two};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(1, output.library().function_size());
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph",
output.library().function(0).signature().name());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph", node.op());
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionXTimesTwo) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef x_times_two = test::function::XTimesTwo();
(*x_times_two.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {x_times_two};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("y", "PartitionedCall", {"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}},
kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(1, output.library().function_size());
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph",
output.library().function(0).signature().name());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("PartitionedCall", node.op());
auto& func = AttrSlice(node).Find("f")->func();
EXPECT_EQ("XTimesTwo_specialized_for_y_at_tf_graph", func.name());
auto& tin = AttrSlice(node).Find("Tin")->list();
auto& tout = AttrSlice(node).Find("Tout")->list();
ASSERT_EQ(1, tin.type_size());
ASSERT_EQ(1, tout.type_size());
EXPECT_EQ(DT_FLOAT, tin.type(0));
EXPECT_EQ(DT_FLOAT, tout.type(0));
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionPushDownConstInput) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("init", "NoOp", {}, {}, kDevice),
NDef("two", "Const", {"^init", "^x"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("y", "MyMul", {"x", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(1, output.library().function_size());
const FunctionDef& specialized = output.library().function(0);
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph",
specialized.signature().name());
EXPECT_EQ(1, specialized.signature().input_arg_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^init", node.input(1));
}
}
EXPECT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionPushDownConstInput) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, double}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("init", "NoOp", {}, {}, kDevice),
NDef("two", "Const", {"^init", "^x"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("y", "PartitionedCall", {"x", "two"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FDH::FunctionRef("MyMul", {{"T", DT_FLOAT}})}},
kDevice),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_EQ(1, output.library().function_size());
const FunctionDef& specialized = output.library().function(0);
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph",
specialized.signature().name());
EXPECT_EQ(1, specialized.signature().input_arg_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "y" && ++count) {
EXPECT_EQ("PartitionedCall", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("x", node.input(0));
EXPECT_EQ("^init", node.input(1));
auto& func = AttrSlice(node).Find("f")->func();
EXPECT_EQ("MyMul_specialized_for_y_at_tf_graph", func.name());
auto& tin = AttrSlice(node).Find("Tin")->list();
auto& tout = AttrSlice(node).Find("Tout")->list();
ASSERT_EQ(1, tin.type_size());
ASSERT_EQ(1, tout.type_size());
EXPECT_EQ(DT_FLOAT, tin.type(0));
EXPECT_EQ(DT_FLOAT, tout.type(0));
}
}
ASSERT_EQ(1, count);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"z"};
item.feed.emplace_back("x", pi);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunction_OncePerUniqueContext) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef mul_func = FunctionDefHelper::Create(
"MyMul", {"x:T", "y:T"}, {"z:T"}, {"T: {float, int32}"},
{{{"output"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z", "output:z:0"}});
(*mul_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {mul_func};
const Tensor kTwo = test::AsScalar<float>(2.0);
const Tensor kThree = test::AsScalar<float>(3.0);
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("xi", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("yi", "Placeholder", {}, {{"dtype", DT_INT32}}, kDevice),
NDef("two", "Const", {"^init", "^xf"},
{{"dtype", DT_FLOAT}, {"value", kTwo}}, kDevice),
NDef("three", "Const", {"^init", "^xf"},
{{"dtype", DT_FLOAT}, {"value", kThree}}, kDevice),
NDef("mul_1", "MyMul", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_2", "MyMul", {"yf", "xf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_3", "MyMul", {"xi", "yi"}, {{"T", DT_INT32}}, kDevice),
NDef("mul_4", "MyMul", {"xf", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_5", "MyMul", {"yf", "two"}, {{"T", DT_FLOAT}}, kDevice),
NDef("mul_6", "MyMul", {"three", "xf"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
item.fetch = {"mul_1", "mul_2", "mul_3", "mul_4", "mul_5", "mul_6"};
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(4, output.library().function_size());
int count = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "mul_1" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_1_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("yf", node.input(1));
} else if (node.name() == "mul_2" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_1_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("yf", node.input(0));
EXPECT_EQ("xf", node.input(1));
} else if (node.name() == "mul_3" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_3_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xi", node.input(0));
EXPECT_EQ("yi", node.input(1));
} else if (node.name() == "mul_4" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_4_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("^init", node.input(1));
} else if (node.name() == "mul_5" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_4_at_tf_graph", node.op());
ASSERT_EQ(3, node.input_size());
EXPECT_EQ("yf", node.input(0));
gtl::FlatSet<string> expected_ctrl = {"^init", "^xf"};
gtl::FlatSet<string> actual_ctrl = {node.input(1), node.input(2)};
EXPECT_EQ(expected_ctrl, actual_ctrl);
} else if (node.name() == "mul_6" && ++count) {
EXPECT_EQ("MyMul_specialized_for_mul_6_at_tf_graph", node.op());
ASSERT_EQ(2, node.input_size());
EXPECT_EQ("xf", node.input(0));
EXPECT_EQ("^init", node.input(1));
}
}
EXPECT_EQ(6, count);
Tensor pi = test::AsScalar<float>(3.14f);
Tensor four = test::AsScalar<int32>(4);
item.feed = {{"xf", pi}, {"yf", pi}, {"xi", four}, {"yi", four}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
test::ExpectTensorEqual<int32>(tensors_expected[2], tensors[2]);
test::ExpectTensorEqual<float>(tensors_expected[3], tensors[3]);
test::ExpectTensorEqual<float>(tensors_expected[4], tensors[4]);
test::ExpectTensorEqual<float>(tensors_expected[5], tensors[5]);
}
TEST_F(FunctionOptimizerTest, SpecializeFunctionForUsedOutputTensors) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T", "z3:T"}, {"T: {float, int32}"},
{{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output3"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "output1:z:0"}, {"z2", "output2:z:0"}, {"z3", "output3:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_func};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_0", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_1", "Identity", {"fn1:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_2", "Identity", {"fn1:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn2_0", "Identity", {"fn2:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn3", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn3_1", "Identity", {"fn3:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn4", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn4_2", "Identity", {"fn4:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn5", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_0", "Identity", {"fn5:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_2", "Identity", {"fn5:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn6", "MyFunc", {"xf", "yf"}, {{"T", DT_FLOAT}}, kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(6, output.library().function_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn1_at_tf_graph", node.op());
} else if (node.name() == "fn2" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn2_at_tf_graph", node.op());
} else if (node.name() == "fn3" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn3_at_tf_graph", node.op());
} else if (node.name() == "fn4" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn4_at_tf_graph", node.op());
} else if (node.name() == "fn5" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn5_at_tf_graph", node.op());
} else if (node.name() == "fn6" && ++found) {
EXPECT_EQ("MyFunc_specialized_for_fn6_at_tf_graph", node.op());
}
if (node.name() == "use_fn3_1" && ++found) {
EXPECT_EQ("fn3", node.input(0));
} else if (node.name() == "use_fn4_2" && ++found) {
EXPECT_EQ("fn4", node.input(0));
} else if (node.name() == "use_fn5_0" && ++found) {
EXPECT_EQ("fn5", node.input(0));
} else if (node.name() == "use_fn5_2" && ++found) {
EXPECT_EQ("fn5:1", node.input(0));
}
}
EXPECT_EQ(10, found);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"use_fn1_0", "use_fn1_1", "use_fn1_2", "use_fn2_0",
"use_fn3_1", "use_fn4_2", "use_fn5_0", "use_fn5_2"};
item.feed = {{"xf", pi}, {"yf", pi}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
TEST_F(FunctionOptimizerTest, SpecializeIndirectFunctionForUsedOutputTensors) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
FunctionDef my_func = FunctionDefHelper::Create(
"MyFunc", {"x:T", "y:T"}, {"z1:T", "z2:T", "z3:T"}, {"T: {float, int32}"},
{{{"output1"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output2"}, "Mul", {"x", "y"}, {{"T", "$T"}}},
{{"output3"}, "Mul", {"x", "y"}, {{"T", "$T"}}}},
{{"z1", "output1:z:0"}, {"z2", "output2:z:0"}, {"z3", "output3:z:0"}});
(*my_func.mutable_attr())["_noinline"].set_b(true);
std::vector<FunctionDef> function_library = {my_func};
GrapplerItem item;
item.id = "tf_graph";
item.graph = test::function::GDef(
{NDef("init", "NoOp", {}, {}, kDevice),
NDef("xf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("yf", "Placeholder", {}, {{"dtype", DT_FLOAT}}, kDevice),
NDef("fn1", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn1_0", "Identity", {"fn1:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_1", "Identity", {"fn1:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn1_2", "Identity", {"fn1:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn2", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn2_0", "Identity", {"fn2:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn3", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn3_1", "Identity", {"fn3:1"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn4", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn4_2", "Identity", {"fn4:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn5", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice),
NDef("use_fn5_0", "Identity", {"fn5:0"}, {{"T", DT_FLOAT}}, kDevice),
NDef("use_fn5_2", "Identity", {"fn5:2"}, {{"T", DT_FLOAT}}, kDevice),
NDef("fn6", "PartitionedCall", {"xf", "yf"},
{{"Tin", DataTypeSlice{DT_FLOAT, DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT, DT_FLOAT, DT_FLOAT}},
{"f", FDH::FunctionRef("MyFunc", {{"T", DT_FLOAT}})}},
kDevice)},
function_library);
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(6, output.library().function_size());
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "fn1" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn1_at_tf_graph", func.name());
ASSERT_EQ(3, tout.type_size());
} else if (node.name() == "fn2" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn2_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn3" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn3_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn4" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn4_at_tf_graph", func.name());
ASSERT_EQ(1, tout.type_size());
} else if (node.name() == "fn5" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn5_at_tf_graph", func.name());
ASSERT_EQ(2, tout.type_size());
} else if (node.name() == "fn6" && ++found) {
auto& func = AttrSlice(node).Find("f")->func();
auto& tout = AttrSlice(node).Find("Tout")->list();
EXPECT_EQ("PartitionedCall", node.op());
EXPECT_EQ("MyFunc_specialized_for_fn6_at_tf_graph", func.name());
ASSERT_EQ(0, tout.type_size());
}
if (node.name() == "use_fn3_1" && ++found) {
EXPECT_EQ("fn3", node.input(0));
} else if (node.name() == "use_fn4_2" && ++found) {
EXPECT_EQ("fn4", node.input(0));
} else if (node.name() == "use_fn5_0" && ++found) {
EXPECT_EQ("fn5", node.input(0));
} else if (node.name() == "use_fn5_2" && ++found) {
EXPECT_EQ("fn5:1", node.input(0));
}
}
EXPECT_EQ(10, found);
Tensor pi = test::AsScalar<float>(3.14f);
item.fetch = {"use_fn1_0", "use_fn1_1", "use_fn1_2", "use_fn2_0",
"use_fn3_1", "use_fn4_2", "use_fn5_0", "use_fn5_2"};
item.feed = {{"xf", pi}, {"yf", pi}};
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < item.fetch.size(); ++i) {
test::ExpectTensorEqual<float>(tensors_expected[i], tensors[i]);
}
}
TEST_F(FunctionOptimizerTest, PruningUselessLibraryFunctions) {
using test::function::NDef;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
auto func = test::function::XTimesTwo();
(*func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "test_graph";
item.graph = test::function::GDef(
{NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, "/device:CPU:0"),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, "/device:CPU:0")},
{
func,
test::function::XTimesTwoInt32(),
test::function::XTimes16(),
});
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
ASSERT_EQ(output.library().function().size(), 1);
EXPECT_EQ(output.library().function(0).signature().name(),
"XTimesTwo_specialized_for_y_at_test_graph");
}
TEST_F(FunctionOptimizerTest, PreserveSaverDefFunctions) {
using test::function::NDef;
using FDH = FunctionDefHelper;
FunctionOptimizer optimizer(RewriterConfig::DEFAULT, true);
auto func = test::function::XTimesTwo();
(*func.mutable_attr())["_noinline"].set_b(true);
GrapplerItem item;
item.id = "test_graph";
item.graph = test::function::GDef(
{
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}, "/device:CPU:0"),
NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("z", "Identity", {"y"}, {{"T", DT_FLOAT}}, "/device:CPU:0"),
NDef("Restore", "StatefulPartitionedCall", {},
{{"Tin", {}},
{"Tout", {}},
{"f", FDH::FunctionRef("RestoreFn", {})}},
"/device:CPU:0"),
NDef("Save", "StatefulPartitionedCall", {},
{{"Tin", {}},
{"Tout", {}},
{"f", FDH::FunctionRef("SaveFn", {})}},
"/device:CPU:0"),
},
{
func,
test::function::XTimesTwoInt32(),
test::function::XTimes16(),
FDH::Create("RestoreFn", {}, {}, {}, {}, {}),
FDH::Create("SaveFn", {}, {}, {}, {}, {}),
});
item.restore_op = "Restore";
item.save_op = "Save";
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
ASSERT_EQ(output.library().function().size(), 3);
std::vector<std::string> signature_names;
for (const auto& function : output.library().function()) {
signature_names.push_back(function.signature().name());
}
EXPECT_THAT(signature_names, ::testing::UnorderedElementsAre(
"XTimesTwo_specialized_for_y_at_test_graph",
"RestoreFn", "SaveFn"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0f3bf028-d254-4a1a-870f-1bc70640aead | cpp | tensorflow/tensorflow | batch_kernels | tensorflow/core/kernels/batch_kernels.cc | tensorflow/core/kernels/batch_kernels_test.cc | #include "tensorflow/core/kernels/batch_kernels.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/batching_util/adaptive_shared_batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_resource_base.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler_utils.h"
#include "tensorflow/core/kernels/batching_util/bounded_executor.h"
#include "tensorflow/core/kernels/batching_util/concat_split_util.h"
#include "tensorflow/core/kernels/batching_util/periodic_function.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kEnableAdaptiveSchedulerAttr[] = "_enable_adaptive_scheduler";
constexpr char kMinInflightBatchesAttr[] = "_min_inflight_batches";
constexpr char kInitialInflightBatchesAttr[] = "_initial_inflight_batches";
constexpr char kMaxInflightBatchesAttr[] = "_max_inflight_batches";
constexpr char kBatchesToAverageOverAttr[] = "_batches_to_average_over";
constexpr char kFullBatchSchedulingBoostMicros[] =
"_full_batch_scheduling_boost_micros";
constexpr int64_t kBatchThreadPoolSize = 128;
}
const int64_t kMinInflightBatches = 1;
const int64_t kInitialInflightBatches = 2;
const int64_t kBatchesToAverageOver = 10;
const int64_t kMaxInflightBatches = 64;
void RecordBatchSplitUsage(
std::optional<bool> maybe_enable_large_batch_splitting,
absl::string_view model_name) {
static auto* cell = monitoring::Gauge<std::string, 1>::New(
"/tensorflow/serving/batching/enable_large_batch_splitting",
"Tracks the usage of attribute `enable_large_batch_splitting` for "
"BatchFunction kernel in a saved model.",
"model_name");
if (maybe_enable_large_batch_splitting.has_value()) {
if (maybe_enable_large_batch_splitting.value()) {
cell->GetCell(std::string(model_name))->Set("true");
} else {
cell->GetCell(std::string(model_name))->Set("false");
}
} else {
cell->GetCell(std::string(model_name))->Set("unset");
}
}
void RecordBatchParamNumBatchThreads(int64_t num_batch_threads,
absl::string_view model_name) {
static auto* cell = monitoring::Gauge<int64_t, 1>::New(
"/tensorflow/serving/batching/num_batch_threads",
"Tracks the number of batch threads of a model.", "model_name");
cell->GetCell(std::string(model_name))->Set(num_batch_threads);
}
absl::string_view GetModelName(OpKernelContext* ctx) {
if (ctx->session_metadata() == nullptr ||
ctx->session_metadata()->name().empty()) {
return "model_name_unset";
}
return ctx->session_metadata()->name();
}
using ::tensorflow::concat_split_util::Concat;
using ::tensorflow::concat_split_util::Split;
int32 NumBatchThreadsFromEnvironmentWithDefault(int default_num_batch_threads) {
int32_t num;
const char* val = std::getenv("TF_NUM_BATCH_THREADS");
return (val && strings::safe_strto32(val, &num)) ? num
: default_num_batch_threads;
}
static thread::ThreadPool* GetOrCreateBatchThreadsPool() {
static thread::ThreadPool* shared_thread_pool = [&]() -> thread::ThreadPool* {
serving::BoundedExecutor::Options options;
options.num_threads =
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize);
options.thread_name = std::string("adaptive_batch_threads");
auto status_or_executor = serving::BoundedExecutor::Create(options);
if (!status_or_executor.ok()) {
LOG(WARNING) << "Failed to create a batch threads pool with error "
<< status_or_executor.status();
return nullptr;
}
static serving::BoundedExecutor* executor =
status_or_executor.value().release();
return new thread::ThreadPool(executor);
}();
return shared_thread_pool;
}
class BatchResource : public serving::BatchResourceBase {
public:
struct BatchTask : serving::BatchResourceBase::BatchTask {
FunctionLibraryRuntime::Handle fhandle;
explicit BatchTask(FunctionLibraryRuntime::Handle fhandle)
: fhandle(fhandle) {}
protected:
std::unique_ptr<serving::BatchResourceBase::BatchTask> CreateDerivedTask()
override {
return std::make_unique<BatchTask>(fhandle);
}
};
static Status Create(bool has_process_batch_function,
int32_t num_batch_threads,
int32_t max_execution_batch_size,
int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
bool enable_large_batch_splitting,
std::unique_ptr<BatchResource>* resource) {
return Create(has_process_batch_function, num_batch_threads,
max_execution_batch_size, batch_timeout_micros,
max_enqueued_batches, allowed_batch_sizes,
0,
0,
0,
{},
serving::MixedPriorityBatchingPolicy::
kLowPriorityPaddingWithMaxBatchSize,
enable_large_batch_splitting,
"PAD_UP", resource);
}
static Status Create(
bool has_process_batch_function, int32_t num_batch_threads,
int32_t max_execution_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
int32_t low_priority_max_batch_size,
int32_t low_priority_batch_timeout_micros,
int32_t low_priority_max_enqueued_batches,
const std::vector<int32>& low_priority_allowed_batch_sizes,
serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy,
bool enable_large_batch_splitting, absl::string_view batch_padding_policy,
std::unique_ptr<BatchResource>* resource) {
BatcherT::Options batcher_options;
batcher_options.num_batch_threads = num_batch_threads;
std::shared_ptr<BatcherT> batcher;
TF_RETURN_IF_ERROR(BatcherT::Create(batcher_options, &batcher));
resource->reset(new BatchResource(
has_process_batch_function, std::move(batcher),
GetBatcherQueueOptions(
num_batch_threads, max_execution_batch_size, batch_timeout_micros,
max_enqueued_batches, allowed_batch_sizes,
enable_large_batch_splitting,
false, batch_padding_policy,
low_priority_max_batch_size, low_priority_batch_timeout_micros,
low_priority_max_enqueued_batches, low_priority_allowed_batch_sizes,
mixed_priority_batching_policy),
allowed_batch_sizes));
return absl::OkStatus();
}
static Status Create(
bool has_process_batch_function,
AdaptiveBatcherT::Options adaptive_shared_batch_scheduler_options,
int32_t max_batch_size, int32_t batch_timeout_micros,
int32_t max_enqueued_batches,
const std::vector<int32>& allowed_batch_sizes,
std::unique_ptr<BatchResource>* resource) {
std::shared_ptr<AdaptiveBatcherT> batcher;
TF_RETURN_IF_ERROR(AdaptiveBatcherT::Create(
adaptive_shared_batch_scheduler_options, &batcher));
resource->reset(new BatchResource(
has_process_batch_function, std::move(batcher),
GetAdaptiveBatcherQueueOptions(
max_batch_size, batch_timeout_micros, max_enqueued_batches,
true, allowed_batch_sizes,
false),
allowed_batch_sizes));
return absl::OkStatus();
}
string DebugString() const final { return "BatchResource"; }
private:
BatchResource(bool has_process_batch_function,
std::shared_ptr<BatcherT> batcher,
const BatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: BatchResourceBase(has_process_batch_function, std::move(batcher),
batcher_queue_options,
std::move(allowed_batch_sizes)) {}
BatchResource(bool has_process_batch_function,
std::shared_ptr<AdaptiveBatcherT> batcher,
const AdaptiveBatcherT::QueueOptions& batcher_queue_options,
std::vector<int32> allowed_batch_sizes)
: BatchResourceBase(has_process_batch_function, std::move(batcher),
batcher_queue_options,
std::move(allowed_batch_sizes)) {}
void ProcessFuncBatchImpl(
const serving::BatchResourceBase::BatchTask& last_task,
absl::Span<const Tensor> inputs, std::vector<Tensor>* combined_outputs,
std::function<void(const Status&)> done) const override {
auto* last_task_context = last_task.context;
FunctionLibraryRuntime::Options opts;
opts.step_container = last_task_context->step_container();
opts.cancellation_manager = last_task_context->cancellation_manager();
opts.collective_executor = last_task_context->collective_executor();
opts.stats_collector = last_task_context->stats_collector();
opts.runner = last_task_context->runner();
opts.run_all_kernels_inline = last_task_context->run_all_kernels_inline();
Notification done_notif;
auto* flib = last_task_context->function_library();
FunctionLibraryRuntime::Handle fhandle =
down_cast<const BatchTask&>(last_task).fhandle;
flib->Run(opts, fhandle, inputs, combined_outputs,
[&](const Status& run_status) {
done(run_status);
done_notif.Notify();
});
done_notif.WaitForNotification();
}
};
BatchFunctionKernel::BatchFunctionKernel(OpKernelConstruction* c)
: AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("container", &container_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_));
OP_REQUIRES_OK(c, c->GetAttr("batching_queue", &batcher_queue_));
OP_REQUIRES_OK(c, c->GetAttr("num_batch_threads", &num_batch_threads_));
OP_REQUIRES_OK(c, c->GetAttr("max_batch_size", &max_batch_size_));
OP_REQUIRES_OK(c, c->GetAttr("batch_timeout_micros", &batch_timeout_micros_));
OP_REQUIRES_OK(c, c->GetAttr("max_enqueued_batches", &max_enqueued_batches_));
OP_REQUIRES_OK(c, c->GetAttr("allowed_batch_sizes", &allowed_batch_sizes_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_batch_size",
&low_priority_max_batch_size_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_batch_timeout_micros",
&low_priority_batch_timeout_micros_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_allowed_batch_sizes",
&low_priority_allowed_batch_sizes_));
OP_REQUIRES_OK(c, c->GetAttr("low_priority_max_enqueued_batches",
&low_priority_max_enqueued_batches_));
OP_REQUIRES_OK(c,
c->GetAttr("mixed_priority_policy", &mixed_priority_policy_));
OP_REQUIRES_OK(c, c->GetAttr("batch_padding_policy", &batch_padding_policy_));
OP_REQUIRES_OK(c, c->GetAttr("f", &func_));
if (c->HasAttr("enable_large_batch_splitting")) {
OP_REQUIRES_OK(c, c->GetAttr("enable_large_batch_splitting",
&enable_large_batch_splitting_));
has_attribute_enable_large_batch_splitting_ = true;
}
SetAdaptiveBatchSchedulerOptions(c, num_batch_threads_);
if (!c->status().ok()) {
return;
}
if (enable_adaptive_batch_threads_) {
batcher_queue_ = name() + "/" + shared_name_ + batcher_queue_;
}
if (shared_name_.empty()) {
shared_name_ = name();
}
OP_REQUIRES_OK(c, ValidateAllowedBatchSizes());
}
bool BatchFunctionKernel::IsExpensive() { return false; }
void BatchFunctionKernel::ComputeAsync(OpKernelContext* c, DoneCallback done) {
RecordBatchSplitUsage(has_attribute_enable_large_batch_splitting_
? std::make_optional(enable_large_batch_splitting_)
: std::nullopt,
GetModelName(c));
RecordBatchParamNumBatchThreads(num_batch_threads_, GetModelName(c));
std::function<Status(BatchResource**)> creator;
FunctionLibraryRuntime::Handle handle;
OP_REQUIRES_OK_ASYNC(c, GetOrCreateFunctionHandle(c, &handle), done);
if (adaptive_batch_scheduler_options_ != std::nullopt) {
creator = [this,
session_metadata = c->session_metadata()](BatchResource** r) {
serving::AdaptiveSharedBatchScheduler<
serving::BatchResourceBase::BatchTask>::Options
adaptive_shared_batch_scheduler_options;
adaptive_shared_batch_scheduler_options.thread_pool_name =
"adaptive_batch_threads";
adaptive_shared_batch_scheduler_options.thread_pool =
GetOrCreateBatchThreadsPool();
adaptive_shared_batch_scheduler_options.num_batch_threads = std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->max_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options.min_in_flight_batches_limit =
std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->min_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options
.initial_in_flight_batches_limit = std::min(
NumBatchThreadsFromEnvironmentWithDefault(kBatchThreadPoolSize),
adaptive_batch_scheduler_options_->initial_in_flight_batches_limit);
adaptive_shared_batch_scheduler_options.batches_to_average_over =
adaptive_batch_scheduler_options_->batches_to_average_over;
if (adaptive_batch_scheduler_options_
->full_batch_scheduling_boost_micros != -1) {
adaptive_shared_batch_scheduler_options
.full_batch_scheduling_boost_micros =
adaptive_batch_scheduler_options_
->full_batch_scheduling_boost_micros;
adaptive_shared_batch_scheduler_options.fifo_scheduling = false;
} else {
adaptive_shared_batch_scheduler_options.fifo_scheduling = true;
}
std::unique_ptr<BatchResource> new_resource;
TF_RETURN_IF_ERROR(BatchResource::Create(
true,
adaptive_shared_batch_scheduler_options, max_batch_size_,
batch_timeout_micros_, max_enqueued_batches_, allowed_batch_sizes_,
&new_resource));
if (session_metadata) {
new_resource->set_session_metadata(*session_metadata);
}
*r = new_resource.release();
return absl::OkStatus();
};
} else {
creator = [this,
session_metadata = c->session_metadata()](BatchResource** r) {
TF_ASSIGN_OR_RETURN(
serving::MixedPriorityBatchingPolicy mixed_priority_batching_policy,
serving::GetMixedPriorityBatchingPolicy(mixed_priority_policy_));
std::unique_ptr<BatchResource> new_resource;
TF_RETURN_IF_ERROR(BatchResource::Create(
true, num_batch_threads_,
max_batch_size_, batch_timeout_micros_, max_enqueued_batches_,
allowed_batch_sizes_, low_priority_max_batch_size_,
low_priority_batch_timeout_micros_,
low_priority_max_enqueued_batches_, low_priority_allowed_batch_sizes_,
mixed_priority_batching_policy, enable_large_batch_splitting_,
batch_padding_policy_, &new_resource));
if (session_metadata) {
new_resource->set_session_metadata(*session_metadata);
}
*r = new_resource.release();
return absl::OkStatus();
};
}
BatchResource* br;
OP_REQUIRES_OK_ASYNC(c,
c->resource_manager()->LookupOrCreate(
container_, shared_name_, &br, creator),
done);
const uint64_t guid = random::New64();
auto create_batch_task_fn =
[handle]() -> absl::StatusOr<
std::unique_ptr<serving::BatchResourceBase::BatchTask>> {
return {std::make_unique<BatchResource::BatchTask>(handle)};
};
Status status;
if (serving::ShouldWarmupAllBatchSizes(c)) {
status = br->RegisterWarmupInputs(guid, c, batcher_queue_,
create_batch_task_fn, done);
} else {
status =
br->RegisterInput(guid, c, batcher_queue_, create_batch_task_fn, done);
}
br->Unref();
OP_REQUIRES_OK_ASYNC(c, status, done);
}
Status BatchFunctionKernel::InstantiateFunction(
OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) const {
FunctionLibraryRuntime* flib = c->function_library();
if (!flib) {
return errors::Internal("No function library");
}
FunctionLibraryRuntime::InstantiateOptions opts;
opts.target = flib->device() == nullptr ? "" : flib->device()->name();
opts.is_multi_device_function = true;
const ConfigProto* config = flib->config_proto();
if (config) {
opts.config_proto = *config;
}
Device* cpu_device;
TF_RETURN_IF_ERROR(flib->device_mgr()->LookupDevice("CPU:0", &cpu_device));
const FunctionDef* fdef =
flib->GetFunctionLibraryDefinition()->Find(func_.name());
if (!fdef) {
return errors::NotFound("Failed to find definition for function \"",
func_.name(), "\"");
}
OpInputList in_tensors;
TF_RETURN_IF_ERROR(c->input_list("in_tensors", &in_tensors));
for (int i = 0; i < in_tensors.size(); i++) {
if (in_tensors[i].dtype() == DT_RESOURCE) {
return errors::InvalidArgument(
"BatchFunction cannot take resource inputs but input ", i,
" is a resource.");
} else {
opts.input_devices.push_back(cpu_device->name());
}
}
OpInputList captured_tensors;
TF_RETURN_IF_ERROR(c->input_list("captured_tensors", &captured_tensors));
for (const Tensor& t : captured_tensors) {
if (t.dtype() == DT_RESOURCE) {
const ResourceHandle& rhandle = t.flat<ResourceHandle>()(0);
opts.input_devices.push_back(rhandle.device());
} else {
opts.input_devices.push_back(cpu_device->name());
}
}
const OpDef& signature = fdef->signature();
for (int i = 0; i < signature.output_arg_size(); i++) {
opts.output_devices.push_back(cpu_device->name());
}
if (opts.input_devices.size() != signature.input_arg_size()) {
return errors::InvalidArgument(
"Function takes ", signature.input_arg_size(), " argument(s) but ",
opts.input_devices.size(), " argument(s) were passed");
}
return flib->Instantiate(func_.name(), AttrSlice(&func_.attr()), opts,
handle);
}
Status BatchFunctionKernel::GetOrCreateFunctionHandle(
OpKernelContext* c, FunctionLibraryRuntime::Handle* handle) {
mutex_lock ml(mu_);
if (!fhandle_) {
TF_RETURN_IF_ERROR(InstantiateFunction(c, handle));
fhandle_ = *handle;
} else {
*handle = fhandle_.value();
}
return absl::OkStatus();
}
Status BatchFunctionKernel::ValidateAllowedBatchSizes() const {
if (allowed_batch_sizes_.empty()) {
return absl::OkStatus();
}
int32_t last_size = 0;
for (size_t i = 0; i < allowed_batch_sizes_.size(); ++i) {
const int32_t size = allowed_batch_sizes_.at(i);
if (i > 0 && size <= last_size) {
return errors::InvalidArgument(
"allowed_batch_sizes entries must be monotonically increasing");
}
if ((!enable_large_batch_splitting_) &&
(i == allowed_batch_sizes_.size() - 1) && (size != max_batch_size_)) {
return errors::InvalidArgument(
"final entry in allowed_batch_sizes must equal max_batch_size when "
"enable_large_batch_splitting is False");
}
last_size = size;
}
return absl::OkStatus();
}
void BatchFunctionKernel::SetAdaptiveBatchSchedulerOptions(
OpKernelConstruction* c, int32_t num_batch_threads) {
if (c->HasAttr(kEnableAdaptiveSchedulerAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kEnableAdaptiveSchedulerAttr,
&enable_adaptive_batch_threads_));
}
if (num_batch_threads <= 0) {
enable_adaptive_batch_threads_ = true;
}
if (!enable_adaptive_batch_threads_) {
return;
}
AdaptiveBatchSchedulerOptions options;
if (c->HasAttr(kBatchesToAverageOverAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kBatchesToAverageOverAttr,
&options.batches_to_average_over));
}
if (c->HasAttr(kMinInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kMinInflightBatchesAttr,
&options.min_in_flight_batches_limit));
}
if (c->HasAttr(kInitialInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kInitialInflightBatchesAttr,
&options.initial_in_flight_batches_limit));
}
if (c->HasAttr(kMaxInflightBatchesAttr)) {
OP_REQUIRES_OK(c, c->GetAttr(kMaxInflightBatchesAttr,
&options.max_in_flight_batches_limit));
}
if (c->HasAttr(kFullBatchSchedulingBoostMicros)) {
OP_REQUIRES_OK(c, c->GetAttr(kFullBatchSchedulingBoostMicros,
&options.full_batch_scheduling_boost_micros));
}
thread::ThreadPool* thread_pool = GetOrCreateBatchThreadsPool();
OP_REQUIRES(
c, thread_pool != nullptr,
errors::FailedPrecondition("Failed to create batch threads pool"));
adaptive_batch_scheduler_options_ = options;
}
REGISTER_KERNEL_BUILDER(Name("BatchFunction").Device(DEVICE_CPU),
BatchFunctionKernel);
REGISTER_KERNEL_BUILDER(Name("BatchFunction")
.Device(DEVICE_GPU)
.HostMemory("in_tensors")
.HostMemory("captured_tensors")
.HostMemory("out_tensors"),
BatchFunctionKernel);
REGISTER_KERNEL_BUILDER(Name("BatchFunction")
.Device(DEVICE_DEFAULT)
.HostMemory("in_tensors")
.HostMemory("captured_tensors")
.HostMemory("out_tensors"),
BatchFunctionKernel);
class BatchKernel : public AsyncOpKernel {
public:
explicit BatchKernel(OpKernelConstruction* c) : AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("container", &container_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_));
if (shared_name_.empty()) {
shared_name_ = name();
}
OP_REQUIRES_OK(c, c->GetAttr("batching_queue", &batcher_queue_));
OP_REQUIRES_OK(c, c->GetAttr("num_batch_threads", &num_batch_threads_));
OP_REQUIRES_OK(c, c->GetAttr("max_batch_size", &max_batch_size_));
OP_REQUIRES_OK(c,
c->GetAttr("batch_timeout_micros", &batch_timeout_micros_));
OP_REQUIRES_OK(c,
c->GetAttr("max_enqueued_batches", &max_enqueued_batches_));
OP_REQUIRES_OK(c, c->GetAttr("allowed_batch_sizes", &allowed_batch_sizes_));
OP_REQUIRES_OK(c, ValidateAllowedBatchSizes());
}
void ComputeAsync(OpKernelContext* c, DoneCallback done) final {
BatchResource* br;
std::function<Status(BatchResource**)> creator = [this](BatchResource** r) {
std::unique_ptr<BatchResource> new_resource;
TF_RETURN_IF_ERROR(BatchResource::Create(
false, num_batch_threads_,
max_batch_size_, batch_timeout_micros_, max_enqueued_batches_,
allowed_batch_sizes_, false, &new_resource));
*r = new_resource.release();
return absl::OkStatus();
};
OP_REQUIRES_OK_ASYNC(c,
c->resource_manager()->LookupOrCreate(
container_, shared_name_, &br, creator),
done);
const Status status = br->RegisterInput(
random::New64(), c, batcher_queue_,
[]() -> absl::StatusOr<
std::unique_ptr<serving::BatchResourceBase::BatchTask>> {
return {std::make_unique<BatchResource::BatchTask>(kInvalidHandle)};
},
done);
br->Unref();
OP_REQUIRES_OK_ASYNC(c, status, done);
}
Status ValidateAllowedBatchSizes() const {
if (allowed_batch_sizes_.empty()) {
return absl::OkStatus();
}
int32_t last_size = 0;
for (size_t i = 0; i < allowed_batch_sizes_.size(); ++i) {
const int32_t size = allowed_batch_sizes_.at(i);
if (i > 0 && size <= last_size) {
return errors::InvalidArgument(
"allowed_batch_sizes entries must be monotonically increasing");
}
if (i == allowed_batch_sizes_.size() - 1 && size != max_batch_size_) {
return errors::InvalidArgument(
"final entry in allowed_batch_sizes must equal max_batch_size");
}
last_size = size;
}
return absl::OkStatus();
}
private:
string container_;
string shared_name_;
string batcher_queue_;
int32 num_batch_threads_;
int32 max_batch_size_;
int32 batch_timeout_micros_;
int32 max_enqueued_batches_;
std::vector<int32> allowed_batch_sizes_;
};
REGISTER_KERNEL_BUILDER(Name("Batch").Device(DEVICE_CPU), BatchKernel);
class UnbatchResource : public ResourceBase {
public:
explicit UnbatchResource(int32_t timeout_micros)
: timeout_micros_(timeout_micros),
timeout_enforcer_(new serving::PeriodicFunction(
[this] { EnforceTimeout(); }, 1000 )) {}
~UnbatchResource() override {
timeout_enforcer_ = nullptr;
}
string DebugString() const final { return "UnbatchResource"; }
Status Compute(OpKernelContext* context, AsyncOpKernel::DoneCallback done) {
const Tensor& data_t = context->input(0);
const Tensor& batch_index_t = context->input(1);
if (batch_index_t.shape().dim_size(0) > data_t.shape().dim_size(0)) {
return errors::InvalidArgument(
"Wrong shape for index tensor. Expected 0th dimension size to be no "
"greater than ",
data_t.shape().dim_size(0),
"; Got: ", batch_index_t.shape().dim_size(0), ".");
}
if (batch_index_t.shape().dim_size(1) != 3) {
return errors::InvalidArgument(
"Wrong shape for index tensor. Expected 1st dimension size to be 3 ; "
"Got: ",
batch_index_t.shape().dim_size(1), ".");
}
if (!TensorShapeUtils::IsScalar(context->input(2).shape())) {
return errors::InvalidArgument(
"Input id should be scalar; "
"Got: ",
context->input(2).DebugString(), ".");
}
const int64_t batch_key = context->input(2).scalar<int64_t>()();
const bool nonempty_input = batch_index_t.dim_size(0) > 0;
std::vector<int64_t> sizes;
std::vector<int64_t> batch_keys;
std::vector<Tensor> split_inputs;
if (nonempty_input) {
auto batch_indices =
batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3});
for (int i = 0; i < batch_index_t.dim_size(0); ++i) {
sizes.push_back(batch_indices(i, 2) - batch_indices(i, 1));
batch_keys.push_back(batch_indices(i, 0));
}
TF_RETURN_IF_ERROR(Split(context, data_t, sizes, &split_inputs));
}
std::vector<AsyncOpKernel::DoneCallback> done_callbacks_to_call;
Status status = [&]() -> Status {
mutex_lock ml(mu_);
auto tensor_it = waiting_tensors_.find(batch_key);
if (tensor_it != waiting_tensors_.end()) {
context->set_output(0, tensor_it->second.tensor);
waiting_tensors_.erase(tensor_it);
done_callbacks_to_call.push_back(done);
return absl::OkStatus();
}
const uint64 deadline_micros =
Env::Default()->NowMicros() + timeout_micros_;
if (!waiting_callbacks_
.emplace(batch_key,
WaitingCallback{deadline_micros, context, done})
.second) {
return errors::AlreadyExists(
"Multiple session runs with the same batch key.");
}
if (nonempty_input) {
for (size_t i = 0; i < batch_keys.size(); ++i) {
auto runs_it = waiting_callbacks_.find(batch_keys[i]);
if (runs_it != waiting_callbacks_.end()) {
runs_it->second.context->set_output(0, split_inputs[i]);
done_callbacks_to_call.push_back(runs_it->second.done);
waiting_callbacks_.erase(runs_it);
} else {
if (!waiting_tensors_
.emplace(batch_keys[i],
WaitingTensor{deadline_micros, split_inputs[i]})
.second) {
return errors::AlreadyExists(
"Multiple tensors returned for same batch key.");
}
}
}
}
return absl::OkStatus();
}();
for (const AsyncOpKernel::DoneCallback& done_callback :
done_callbacks_to_call) {
done_callback();
}
return status;
}
private:
void EnforceTimeout() {
const uint64 now = Env::Default()->NowMicros();
std::vector<WaitingCallback> evicted_callbacks;
{
mutex_lock ml(mu_);
for (auto it = waiting_tensors_.begin(); it != waiting_tensors_.end();) {
const WaitingTensor& waiting_tensor = it->second;
if (waiting_tensor.deadline_micros < now) {
it = waiting_tensors_.erase(it);
} else {
++it;
}
}
for (auto it = waiting_callbacks_.begin();
it != waiting_callbacks_.end();) {
const WaitingCallback& waiting_callback = it->second;
if (waiting_callback.deadline_micros < now) {
evicted_callbacks.push_back(waiting_callback);
it = waiting_callbacks_.erase(it);
} else {
++it;
}
}
}
for (const WaitingCallback& evicted_callback : evicted_callbacks) {
evicted_callback.context->CtxFailureWithWarning(errors::DeadlineExceeded(
"Batched data did not arrive within timeout window."));
evicted_callback.done();
}
}
struct WaitingTensor {
uint64 deadline_micros;
Tensor tensor;
};
struct WaitingCallback {
uint64 deadline_micros;
OpKernelContext* context;
AsyncOpKernel::DoneCallback done;
};
const int32 timeout_micros_;
mutex mu_;
std::unordered_map<int64_t, WaitingTensor> waiting_tensors_
TF_GUARDED_BY(mu_);
std::unordered_map<int64_t, WaitingCallback> waiting_callbacks_
TF_GUARDED_BY(mu_);
std::unique_ptr<serving::PeriodicFunction> timeout_enforcer_;
};
class UnbatchKernel : public AsyncOpKernel {
public:
explicit UnbatchKernel(OpKernelConstruction* c) : AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("container", &container_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_));
if (shared_name_.empty()) {
shared_name_ = name();
}
OP_REQUIRES_OK(c, c->GetAttr("timeout_micros", &timeout_micros_));
}
void ComputeAsync(OpKernelContext* c, DoneCallback done) final {
UnbatchResource* ubr;
std::function<Status(UnbatchResource**)> creator =
[this](UnbatchResource** r) {
*r = new UnbatchResource(timeout_micros_);
return absl::OkStatus();
};
OP_REQUIRES_OK_ASYNC(c,
c->resource_manager()->LookupOrCreate(
container_, shared_name_, &ubr, creator),
done);
auto status = ubr->Compute(c, done);
ubr->Unref();
OP_REQUIRES_OK_ASYNC(c, status, done);
}
private:
string container_;
string shared_name_;
int32 timeout_micros_;
};
REGISTER_KERNEL_BUILDER(Name("Unbatch").Device(DEVICE_CPU), UnbatchKernel);
class UnbatchGradResource : public ResourceBase {
public:
UnbatchGradResource() {}
string DebugString() const final { return "UnbatchGradResource"; }
Status OutputBatch(OpKernelContext* context,
const AsyncOpKernel::DoneCallback& done)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
const Tensor& batch_index_t = context->input(1);
auto batch_index =
batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3});
std::vector<Tensor> tensors;
for (int i = 0; i < batch_index_t.dim_size(0); ++i) {
auto available_it = available_tensors_.find(batch_index(i, 0));
if (available_it == available_tensors_.end()) {
return errors::Internal("bad bookkeeping of available tensors.");
}
tensors.push_back(available_it->second);
available_tensors_.erase(available_it);
}
const DataType type = tensors[0].dtype();
Tensor concatenated_tensor;
switch (type) {
#define CASE(type) \
case DataTypeToEnum<type>::value: \
TF_RETURN_IF_ERROR(Concat<type>(context, tensors, &concatenated_tensor)); \
context->set_output(0, concatenated_tensor); \
break;
TF_CALL_ALL_TYPES(CASE);
#undef CASE
default:
return errors::InvalidArgument("Unsupported data type: ", type);
}
done();
return absl::OkStatus();
}
Status Compute(OpKernelContext* context,
const AsyncOpKernel::DoneCallback& done) {
const Tensor& data_t = context->input(0);
const Tensor& batch_index_t = context->input(1);
const Tensor& grad_t = context->input(2);
const Tensor& batch_key_t = context->input(3);
mutex_lock ml(mu_);
if (!TensorShapeUtils::IsScalar(batch_key_t.shape())) {
return errors::InvalidArgument("Expected `id` to be scalar. Received ",
batch_key_t.DebugString());
}
const int64_t batch_key = context->input(3).scalar<int64_t>()();
if (!available_tensors_.emplace(batch_key, grad_t).second) {
return errors::InvalidArgument("Two runs with the same batch key.");
}
if (data_t.NumElements() > 0) {
if (batch_index_t.NumElements() == 0) {
return errors::InvalidArgument(
"batch_index is empty while the tensor isn't.");
}
std::unordered_set<int64_t> missing_tensors;
if (batch_index_t.NumElements() != batch_index_t.dim_size(0) * 3) {
return errors::InvalidArgument(
"batch_index should contain ", batch_index_t.dim_size(0) * 3,
" elements. Received ", batch_index_t.NumElements());
}
const auto batch_index =
batch_index_t.shaped<int64_t, 2>({batch_index_t.dim_size(0), 3});
for (int i = 0; i < batch_index_t.dim_size(0); ++i) {
const int64_t batch_key = batch_index(i, 0);
if (available_tensors_.find(batch_key) == available_tensors_.end()) {
missing_tensors.emplace(batch_key);
}
}
if (missing_tensors.empty()) {
return OutputBatch(context, done);
}
if (!available_batches_
.emplace(batch_key, Batch{missing_tensors, context, done})
.second) {
return errors::InvalidArgument(
"Batch key with valid batch used twice.");
}
for (const int64_t i : missing_tensors) {
if (!desired_tensor_to_batch_map_.emplace(i, batch_key).second) {
return errors::InvalidArgument(
"Missing tensor wanted by more than one batch.");
}
}
} else {
TensorShape output_shape(grad_t.shape());
output_shape.set_dim(0, 0);
Tensor* output = nullptr;
TF_RETURN_IF_ERROR(context->allocate_output(0, output_shape, &output));
done();
}
auto desire_it = desired_tensor_to_batch_map_.find(batch_key);
if (desire_it != desired_tensor_to_batch_map_.end()) {
auto batch_it = available_batches_.find(desire_it->second);
desired_tensor_to_batch_map_.erase(desire_it);
if (batch_it == available_batches_.end()) {
return errors::InvalidArgument("Batch no longer exists.");
}
batch_it->second.missing_tensors.erase(batch_key);
if (batch_it->second.missing_tensors.empty()) {
TF_RETURN_IF_ERROR(
OutputBatch(batch_it->second.context, batch_it->second.done));
available_batches_.erase(batch_it);
}
}
return absl::OkStatus();
}
private:
mutex mu_;
struct Batch {
std::unordered_set<int64_t> missing_tensors;
OpKernelContext* context;
AsyncOpKernel::DoneCallback done;
};
std::unordered_map<int64_t, Batch> available_batches_;
std::unordered_map<int64_t, Tensor> available_tensors_;
std::unordered_map<int64_t, int64_t> desired_tensor_to_batch_map_;
};
class UnbatchGradKernel : public AsyncOpKernel {
public:
explicit UnbatchGradKernel(OpKernelConstruction* c) : AsyncOpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("container", &container_));
OP_REQUIRES_OK(c, c->GetAttr("shared_name", &shared_name_));
if (shared_name_.empty()) {
shared_name_ = name();
}
}
void ComputeAsync(OpKernelContext* c, DoneCallback done) final {
UnbatchGradResource* ubr;
std::function<Status(UnbatchGradResource**)> creator =
[](UnbatchGradResource** r) {
*r = new UnbatchGradResource();
return absl::OkStatus();
};
OP_REQUIRES_OK_ASYNC(c,
c->resource_manager()->LookupOrCreate(
container_, shared_name_, &ubr, creator),
done);
Status status = ubr->Compute(c, done);
ubr->Unref();
OP_REQUIRES_OK_ASYNC(c, status, done);
}
private:
string container_;
string shared_name_;
};
REGISTER_KERNEL_BUILDER(Name("UnbatchGrad").Device(DEVICE_CPU),
UnbatchGradKernel);
} | #include "tensorflow/core/kernels/batch_kernels.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/batch_kernel_test_util.h"
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/kernels/batching_util/warmup.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/version.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/criticality.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using PerModelData = serving::WarmupStateRegistry::PerModelData;
class BatchFunctionKernelTest : public test_util::BatchFunctionKernelTestBase {
};
TEST_P(BatchFunctionKernelTest, EnableAdaptiveScheduler) {
const bool adaptive_scheduler_enabled = GetParam();
TF_EXPECT_OK(Init(adaptive_scheduler_enabled));
BatchFunctionKernel *batch_kernel =
dynamic_cast<BatchFunctionKernel *>(op_kernel());
EXPECT_EQ(adaptive_scheduler_enabled,
test_util::BatchFunctionKernelTestAccess(batch_kernel)
.enable_adaptive_batch_threads());
}
INSTANTIATE_TEST_SUITE_P(Params, BatchFunctionKernelTest, ::testing::Bool());
class SharedBatchFunctionTestState : public OpsTestBase {
public:
void CreateFunctionLibraryRuntime() {
pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr_.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr *device_mgr,
tsl::core::RefCountPtr<Rendezvous> *r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}});
}
protected:
absl::StatusOr<NodeDefBuilder> CreateBatchFunctionBuilder(
const std::vector<int> &allowed_batch_sizes, int max_batch_size,
absl::string_view padding_policy,
const TensorShape &expected_output_shape) {
NameAttrList f;
f.set_name("ShapeEnforcingFunction");
FunctionDef func = FunctionDefHelper::Create(
f.name(),
{"x:int64"},
{"o:int64"},
{},
{{{"o"},
"EnsureShape",
{"x"},
{{"T", DataType::DT_INT64}, {"shape", expected_output_shape}}}},
{{"o", "o:output"}});
TF_RETURN_IF_ERROR(flib_def_->AddFunctionDef(func));
SharedBatchFunctionTestState::CreateFunctionLibraryRuntime();
std::vector<NodeDefBuilder::NodeOut> inputs(
{NodeDefBuilder::NodeOut({"n1", 0, DataType::DT_INT64})});
return NodeDefBuilder(absl::StrCat("BatchTPUInput", padding_policy),
"BatchFunction")
.Attr("max_batch_size", max_batch_size)
.Attr("num_batch_threads", 8)
.Attr("allowed_batch_sizes", allowed_batch_sizes)
.Attr("batch_timeout_micros", 1000000)
.Attr("max_enqueued_batches", 10)
.Attr("enable_large_batch_splitting", true)
.Attr("batch_padding_policy", padding_policy)
.Attr("Tin", {DataType::DT_INT64})
.Input(inputs)
.Attr("Tcaptured", std::vector<DataType>{})
.Input(std::vector<NodeDefBuilder::NodeOut>{})
.Attr("Tout", std::vector<DataType>{DT_INT64})
.Attr("f", f);
}
};
class BatchFunctionTestState : public SharedBatchFunctionTestState {
public:
absl::Status Init(Device *device, bool enable_low_priority_queue,
absl::string_view mixed_priority_policy,
int64_t expected_batch_size) {
device_ = device;
const TensorShape expected_output_shape({expected_batch_size, 2});
TF_ASSIGN_OR_RETURN(
NodeDefBuilder builder,
CreateBatchFunctionBuilder({4, 8}, 8, "PAD_UP", expected_output_shape));
TF_RETURN_IF_ERROR(builder
.Attr("low_priority_max_batch_size",
enable_low_priority_queue ? 8 : 0)
.Attr("low_priority_batch_timeout_micros",
enable_low_priority_queue ? 2000000 : 0)
.Attr("low_priority_allowed_batch_sizes",
enable_low_priority_queue
? std::vector<int>{4, 8}
: std::vector<int>())
.Attr("low_priority_max_enqueued_batches",
enable_low_priority_queue ? 2 : 0)
.Attr("mixed_priority_policy", mixed_priority_policy)
.Finalize(node_def()));
return OpsTestBase::InitOp();
}
void TestBody() override {}
};
class BatchFunctionTest : public ::testing::TestWithParam<bool> {
protected:
void SetUp() override {
cpu_device_ =
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
}
std::unique_ptr<Device> cpu_device_;
};
TEST_P(BatchFunctionTest, BatchingWorksWithoutCriticality) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 8; ++i) {
Env::Default()->SchedClosure([&]() {
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest, PaddingWorksWithoutCriticality) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(2);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCritical);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#if defined(PLATFORM_GOOGLE)
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchUptoMaxBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchWithExtraPadding) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(2);
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(
test_state.Init(cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(
test_state.Init(cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
blocking_counter.Wait();
}
}
TEST_P(BatchFunctionTest,
LowPriorityTaskPaddingHighPriorityBatchUptoNextAllowedBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
bool enable_low_priority_queue = GetParam();
{
tsl::BlockingCounter blocking_counter(4);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(), enable_low_priority_queue,
serving::kLowPriorityPaddingWithNextAllowedBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#endif
INSTANTIATE_TEST_SUITE_P(BatchFunctionTest, BatchFunctionTest,
::testing::Bool());
#if defined(PLATFORM_GOOGLE)
TEST_F(BatchFunctionTest, HighPriorityBatchNotPaddedWithLowPriorityTasks) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kCriticalPlus);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kCriticalPlus);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(cpu_device_.get(),
true,
serving::kPriorityIsolationAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
for (int i = 0; i < 4; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(cpu_device_.get(),
true,
serving::kPriorityIsolationAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_F(BatchFunctionTest, LowPriorityOnlyBatchAtMaxLowPriorityBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(8);
for (int i = 0; i < 8; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(),
true,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
8));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
TEST_F(BatchFunctionTest, LowPriorityBatchPaddedToLowPriorityAllowedBatchSize) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
{
tsl::BlockingCounter blocking_counter(2);
for (int i = 0; i < 2; ++i) {
Env::Default()->SchedClosure([&]() {
tsl::criticality::ScopedCriticality scoped_criticality(
tsl::criticality::Criticality::kSheddable);
ASSERT_EQ(tsl::criticality::GetCriticality(),
tsl::criticality::Criticality::kSheddable);
BatchFunctionTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_ASSERT_OK(test_state.Init(
cpu_device_.get(),
true,
serving::kLowPriorityPaddingWithMaxBatchSizeAttrValue,
4));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {234, 567});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({234, 567}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
#endif
class BatchFunctionKernelParallelWarmupTestState
: public SharedBatchFunctionTestState {
public:
absl::Status Init(bool enable_splitting) {
static auto *const cpu_device = []() {
auto device =
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
return device.release();
}();
device_ = cpu_device;
const TensorShape expected_output_shape({2});
TF_ASSIGN_OR_RETURN(
NodeDefBuilder builder,
CreateBatchFunctionBuilder({2, 4, 8}, enable_splitting ? 16 : 8,
"PAD_UP", expected_output_shape));
TF_RETURN_IF_ERROR(builder.Finalize(node_def()));
return OpsTestBase::InitOp();
}
void TestBody() override {}
};
class BatchFunctionKernelParallelWarmupTest
: public ::testing::TestWithParam<bool> {};
TEST_P(BatchFunctionKernelParallelWarmupTest, ParallelWarmup) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
serving::WarmupStateRegistry::Key key(session_metadata.name(),
session_metadata.version());
int num_requests = 16;
bool enable_splitting = GetParam();
{
auto per_model_data = std::make_unique<PerModelData>();
auto handle = serving::GetGlobalWarmupStateRegistry().Register(
key, std::move(per_model_data));
tsl::BlockingCounter blocking_counter(num_requests);
for (int i = 0; i < num_requests; ++i) {
Env::Default()->SchedClosure([&]() {
BatchFunctionKernelParallelWarmupTestState test;
test.set_session_metadata(session_metadata);
TF_CHECK_OK(test.Init(enable_splitting));
test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456});
TF_CHECK_OK(test.RunOpKernel());
test::ExpectTensorEqual<int64_t>(*test.GetOutput(0),
test::AsTensor<int64_t>({123, 456}));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
EXPECT_FALSE(serving::GetGlobalWarmupStateRegistry().Lookup(key));
{
tsl::BlockingCounter blocking_counter(num_requests);
for (int i = 0; i < num_requests; ++i) {
Env::Default()->SchedClosure([&]() {
BatchFunctionKernelParallelWarmupTestState test;
test.set_session_metadata(session_metadata);
TF_CHECK_OK(test.Init(enable_splitting));
test.AddInputFromList<int64_t>(TensorShape({2}), {123, 456});
EXPECT_FALSE(test.RunOpKernel().ok());
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
INSTANTIATE_TEST_SUITE_P(BatchFunctionKernelParallelWarmupTestSuite,
BatchFunctionKernelParallelWarmupTest,
::testing::Bool());
class BatchFunctionKernelPaddingTestState
: public SharedBatchFunctionTestState {
public:
absl::Status Init(absl::string_view padding_policy, int expected_batch_size) {
static auto *const cpu_device = []() {
auto device =
DeviceFactory::NewDevice("CPU", {}, "/job:a/replica:0/task:0");
return device.release();
}();
device_ = cpu_device;
const TensorShape expected_output_shape({expected_batch_size, 2});
TF_RETURN_IF_ERROR(CreateBatchFunctionBuilder({4, 8}, 8, padding_policy,
expected_output_shape)
->Finalize(node_def()));
return OpsTestBase::InitOp();
}
void TestBody() override {}
};
class BatchFunctionKernelPaddingTest
: public ::testing::TestWithParam<std::string> {};
TEST_P(BatchFunctionKernelPaddingTest, PadUp) {
SessionMetadata session_metadata;
session_metadata.set_name("test_model");
session_metadata.set_version(123);
int64_t num_requests = 5;
int64_t expected_batch_size = 0;
std::string padding_policy = GetParam();
if (padding_policy == "PAD_UP") {
expected_batch_size = 8;
} else if (padding_policy == "BATCH_DOWN") {
expected_batch_size = 4;
} else if (padding_policy == "MINIMIZE_TPU_COST_PER_REQUEST") {
expected_batch_size = 8;
} else {
FAIL() << "Unsupported padding policy: " << padding_policy;
}
{
tsl::BlockingCounter blocking_counter(num_requests);
for (int i = 0; i < num_requests; ++i) {
Env::Default()->SchedClosure([&]() {
BatchFunctionKernelPaddingTestState test_state;
test_state.set_session_metadata(session_metadata);
TF_CHECK_OK(test_state.Init(padding_policy, expected_batch_size));
test_state.AddInputFromList<int64_t>(TensorShape({1, 2}), {123, 456});
TF_EXPECT_OK(test_state.RunOpKernel());
test::ExpectTensorEqual<int64_t>(
*test_state.GetOutput(0),
test::AsTensor<int64_t>({123, 456}, TensorShape({1, 2})));
blocking_counter.DecrementCount();
});
}
blocking_counter.Wait();
}
}
INSTANTIATE_TEST_SUITE_P(BatchFunctionKernelPaddingTestSuite,
BatchFunctionKernelPaddingTest,
::testing::Values("PAD_UP", "BATCH_DOWN",
"MINIMIZE_TPU_COST_PER_REQUEST"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_kernels.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batch_kernels_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
387b62c0-4dcf-4a70-bea4-c87442267e17 | cpp | tensorflow/tensorflow | subgraph_tensor_profiler | tensorflow/lite/profiling/subgraph_tensor_profiler.cc | tensorflow/lite/profiling/subgraph_tensor_profiler_test.cc | #include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <cstring>
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite::profiling {
SubgraphTensorProfiler::SubgraphTensorProfiler(const Interpreter& interpreter,
CallbackT callback)
: interpreter_(interpreter), callback_(callback) {
events_.reserve(interpreter.subgraphs_size());
}
uint32_t SubgraphTensorProfiler::BeginEvent(const char* tag,
EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (strcmp(tag, "Invoke")) {
return 0;
}
events_.push_back(event_metadata2);
return events_.size();
}
void SubgraphTensorProfiler::EndEvent(uint32_t event_handle) {
if (!event_handle || events_.size() < event_handle) {
return;
}
const Subgraph* subgraph = interpreter_.subgraph(events_[event_handle - 1]);
for (int i = 0; i < subgraph->tensors_size(); ++i) {
callback_(subgraph->tensor(i));
}
}
} | #include "tensorflow/lite/profiling/subgraph_tensor_profiler.h"
#include <functional>
#include <string>
#include <unordered_set>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite::profiling {
namespace {
using ::testing::IsSupersetOf;
using ::testing::Not;
constexpr const char* kIfSubgraphTensorNames[] = {
"if_cond",
"if_input2",
"if_input3",
"if_output1",
};
constexpr const char* kAddSubgraphTensorNames[] = {
"add_input1",
"add_input2",
"add_output1",
};
constexpr const char* kMulSubgraphTensorNames[] = {
"mul_input1",
"mul_input2",
"mul_output1",
};
struct TensorGatherer {
void operator()(const TfLiteTensor* tensor) { tensors.insert(tensor->name); }
std::unordered_set<std::string> tensors;
};
class SubgraphTensorProfilerTest
: public subgraph_test_util::ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
subgraph_test_util::FillIntTensor(
interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
NameTensors();
}
private:
void NameTensors() {
auto set_names = [](Subgraph* subgraph, auto names) {
for (int j = 0; j < subgraph->tensors_size(); ++j) {
subgraph->tensor(j)->name = names[j];
}
};
set_names(interpreter_->subgraph(0), kIfSubgraphTensorNames);
set_names(interpreter_->subgraph(1), kAddSubgraphTensorNames);
set_names(interpreter_->subgraph(2), kMulSubgraphTensorNames);
}
};
TEST_F(SubgraphTensorProfilerTest, TestMulSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kMulSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kAddSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestAddSubgraph) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
interpreter_->AddProfiler(&profiler);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kIfSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors, IsSupersetOf(kAddSubgraphTensorNames));
EXPECT_THAT(tensor_gatherer.tensors,
Not(IsSupersetOf(kMulSubgraphTensorNames)));
}
TEST_F(SubgraphTensorProfilerTest, TestBeginEvent) {
TensorGatherer tensor_gatherer;
tflite::profiling::SubgraphTensorProfiler profiler(*interpreter_,
std::ref(tensor_gatherer));
const int subgraph_id = 1;
uint32_t valid_event = profiler.BeginEvent(
"Invoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(valid_event, 1);
uint32_t invalid_event = profiler.BeginEvent(
"NotInvoke", Profiler::EventType::DEFAULT, 0, subgraph_id);
EXPECT_EQ(invalid_event, 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/subgraph_tensor_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/subgraph_tensor_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd4c1478-bbde-4786-a6d1-34d8a3fb48f2 | cpp | tensorflow/tensorflow | text_literal_writer | third_party/xla/xla/text_literal_writer.cc | third_party/xla/xla/text_literal_writer_test.cc | #include "xla/text_literal_writer.h"
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
namespace xla {
absl::Status TextLiteralWriter::WriteToPath(
const Literal& literal, absl::string_view path) {
std::unique_ptr<tsl::WritableFile> f;
auto s = tsl::Env::Default()->NewWritableFile(std::string(path), &f);
if (!s.ok()) {
return s;
}
s = f->Append(ShapeUtil::HumanString(literal.shape()) + "\n");
if (!s.ok()) {
return s;
}
absl::Status status;
tsl::WritableFile* f_ptr = f.get();
literal.EachCellAsString([f_ptr, &status](absl::Span<const int64_t> indices,
const std::string& value) {
if (!status.ok()) {
return;
}
std::string coordinates =
absl::StrCat("(", absl::StrJoin(indices, ", "), ")");
status = f_ptr->Append(absl::StrCat(coordinates, ": ", value, "\n"));
});
auto ignored = f->Close();
return status;
}
} | #include "xla/text_literal_writer.h"
#include <memory>
#include <string>
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace {
TEST(TextLiteralWriterTest, WritesFloatLiteral) {
auto literal = LiteralUtil::CreateR2<float>({
{3.14, 2.17},
{1.23, 4.56},
});
std::string path;
ASSERT_TRUE(tsl::Env::Default()->LocalTempFilename(&path));
ASSERT_IS_OK(TextLiteralWriter::WriteToPath(literal, path));
std::string contents;
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), path, &contents));
const std::string expected = R"(f32[2,2]
(0, 0): 3.14
(0, 1): 2.17
(1, 0): 1.23
(1, 1): 4.56
)";
EXPECT_EQ(expected, contents);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/text_literal_writer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/text_literal_writer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c45fb7a3-0e45-4799-97f2-897cff4b579a | cpp | tensorflow/tensorflow | signature_def_util | tensorflow/lite/tools/signature/signature_def_util.cc | tensorflow/lite/tools/signature/signature_def_util_test.cc | #include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <map>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/flexbuffers.h"
#include "flatbuffers/vector.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tsl/platform/status.h"
namespace tflite {
namespace {
using tensorflow::Status;
using SerializedSignatureDefMap = std::map<std::string, std::string>;
using SignatureDefMap = std::map<std::string, tensorflow::SignatureDef>;
const Metadata* GetSignatureDefMetadata(const Model* model) {
if (!model || !model->metadata()) {
return nullptr;
}
for (int i = 0; i < model->metadata()->size(); ++i) {
const Metadata* metadata = model->metadata()->Get(i);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
return metadata;
}
}
return nullptr;
}
Status ReadSignatureDefMap(const Model* model, const Metadata* metadata,
SerializedSignatureDefMap* map) {
if (!model || !metadata || !map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
const flatbuffers::Vector<uint8_t>* flatbuffer_data =
model->buffers()->Get(metadata->buffer())->data();
const auto signature_defs =
flexbuffers::GetRoot(flatbuffer_data->data(), flatbuffer_data->size())
.AsMap();
for (int i = 0; i < signature_defs.Keys().size(); ++i) {
const std::string key = signature_defs.Keys()[i].AsString().c_str();
(*map)[key] = signature_defs[key].AsString().c_str();
}
return absl::OkStatus();
}
}
Status SetSignatureDefMap(const Model* model,
const SignatureDefMap& signature_def_map,
std::string* model_data_with_signature_def) {
if (!model || !model_data_with_signature_def) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
if (signature_def_map.empty()) {
return tensorflow::errors::InvalidArgument(
"signature_def_map should not be empty");
}
flexbuffers::Builder fbb;
const size_t start_map = fbb.StartMap();
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
int buffer_id = mutable_model->buffers.size();
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
buffer_id = metadata->buffer();
} else {
auto buffer = std::make_unique<BufferT>();
mutable_model->buffers.emplace_back(std::move(buffer));
auto sigdef_metadata = std::make_unique<MetadataT>();
sigdef_metadata->buffer = buffer_id;
sigdef_metadata->name = kSignatureDefsMetadataName;
mutable_model->metadata.emplace_back(std::move(sigdef_metadata));
}
for (const auto& entry : signature_def_map) {
fbb.String(entry.first.c_str(), entry.second.SerializeAsString());
}
fbb.EndMap(start_map);
fbb.Finish();
mutable_model->buffers[buffer_id]->data = fbb.GetBuffer();
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data_with_signature_def =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
bool HasSignatureDef(const Model* model, const std::string& signature_key) {
if (!model) {
return false;
}
const Metadata* metadata = GetSignatureDefMetadata(model);
if (!metadata) {
return false;
}
SerializedSignatureDefMap signature_defs;
if (ReadSignatureDefMap(model, metadata, &signature_defs) !=
absl::OkStatus()) {
return false;
}
return (signature_defs.find(signature_key) != signature_defs.end());
}
Status GetSignatureDefMap(const Model* model,
SignatureDefMap* signature_def_map) {
if (!model || !signature_def_map) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
SignatureDefMap retrieved_signature_def_map;
const Metadata* metadata = GetSignatureDefMetadata(model);
if (metadata) {
SerializedSignatureDefMap signature_defs;
auto status = ReadSignatureDefMap(model, metadata, &signature_defs);
if (status != absl::OkStatus()) {
return tensorflow::errors::Internal("Error reading signature def map: ",
status.message());
}
for (const auto& entry : signature_defs) {
tensorflow::SignatureDef signature_def;
if (!signature_def.ParseFromString(entry.second)) {
return tensorflow::errors::Internal(
"Cannot parse signature def found in flatbuffer.");
}
retrieved_signature_def_map[entry.first] = signature_def;
}
*signature_def_map = retrieved_signature_def_map;
}
return absl::OkStatus();
}
Status ClearSignatureDefMap(const Model* model, std::string* model_data) {
if (!model || !model_data) {
return tensorflow::errors::InvalidArgument("Arguments must not be nullptr");
}
auto mutable_model = std::make_unique<ModelT>();
model->UnPackTo(mutable_model.get(), nullptr);
for (int id = 0; id < model->metadata()->size(); ++id) {
const Metadata* metadata = model->metadata()->Get(id);
if (metadata->name()->str() == kSignatureDefsMetadataName) {
auto* buffers = &(mutable_model->buffers);
buffers->erase(buffers->begin() + metadata->buffer());
mutable_model->metadata.erase(mutable_model->metadata.begin() + id);
break;
}
}
flatbuffers::FlatBufferBuilder builder;
auto packed_model = Model::Pack(builder, mutable_model.get());
FinishModelBuffer(builder, packed_model);
*model_data =
std::string(reinterpret_cast<const char*>(builder.GetBufferPointer()),
builder.GetSize());
return absl::OkStatus();
}
} | #include "tensorflow/lite/tools/signature/signature_def_util.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "flatbuffers/buffer.h"
#include "tensorflow/cc/saved_model/signature_constants.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tsl/platform/status.h"
namespace tflite {
namespace {
using tensorflow::kClassifyMethodName;
using tensorflow::kDefaultServingSignatureDefKey;
using tensorflow::kPredictMethodName;
using tensorflow::SignatureDef;
using tensorflow::Status;
constexpr char kSignatureInput[] = "input";
constexpr char kSignatureOutput[] = "output";
constexpr char kTestFilePath[] = "tensorflow/lite/testdata/add.bin";
class SimpleSignatureDefUtilTest : public testing::Test {
protected:
void SetUp() override {
flatbuffer_model_ = FlatBufferModel::BuildFromFile(kTestFilePath);
ASSERT_NE(flatbuffer_model_, nullptr);
model_ = flatbuffer_model_->GetModel();
ASSERT_NE(model_, nullptr);
}
SignatureDef GetTestSignatureDef() {
auto signature_def = SignatureDef();
tensorflow::TensorInfo input_tensor;
tensorflow::TensorInfo output_tensor;
*input_tensor.mutable_name() = kSignatureInput;
*output_tensor.mutable_name() = kSignatureOutput;
*signature_def.mutable_method_name() = kClassifyMethodName;
(*signature_def.mutable_inputs())[kSignatureInput] = input_tensor;
(*signature_def.mutable_outputs())[kSignatureOutput] = output_tensor;
return signature_def;
}
std::unique_ptr<FlatBufferModel> flatbuffer_model_;
const Model* model_;
};
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefTest) {
SignatureDef expected_signature_def = GetTestSignatureDef();
std::string model_output;
const std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, OverwriteSignatureDefTest) {
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
SignatureDef test_signature_def =
test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
*expected_signature_def.mutable_method_name() = kPredictMethodName;
expected_signature_def_map.erase(
expected_signature_def_map.find(kDefaultServingSignatureDefKey));
constexpr char kTestSignatureDefKey[] = "ServingTest";
expected_signature_def_map[kTestSignatureDefKey] = expected_signature_def;
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(add_model, expected_signature_def_map, &model_output));
const Model* final_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(final_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
EXPECT_NE(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_TRUE(HasSignatureDef(final_model, kTestSignatureDefKey));
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(final_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kTestSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
}
TEST_F(SimpleSignatureDefUtilTest, GetSignatureDefTest) {
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(model_, &test_signature_def_map));
EXPECT_FALSE(HasSignatureDef(model_, kDefaultServingSignatureDefKey));
}
TEST_F(SimpleSignatureDefUtilTest, ClearSignatureDefTest) {
const int expected_num_buffers = model_->buffers()->size();
auto expected_signature_def = GetTestSignatureDef();
std::string model_output;
std::map<string, SignatureDef> expected_signature_def_map = {
{kDefaultServingSignatureDefKey, expected_signature_def}};
EXPECT_EQ(
absl::OkStatus(),
SetSignatureDefMap(model_, expected_signature_def_map, &model_output));
const Model* add_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_TRUE(HasSignatureDef(add_model, kDefaultServingSignatureDefKey));
SignatureDef test_signature_def;
std::map<string, SignatureDef> test_signature_def_map;
EXPECT_EQ(absl::OkStatus(),
GetSignatureDefMap(add_model, &test_signature_def_map));
test_signature_def = test_signature_def_map[kDefaultServingSignatureDefKey];
EXPECT_EQ(expected_signature_def.SerializeAsString(),
test_signature_def.SerializeAsString());
EXPECT_EQ(absl::OkStatus(), ClearSignatureDefMap(add_model, &model_output));
const Model* clear_model = flatbuffers::GetRoot<Model>(model_output.data());
EXPECT_FALSE(HasSignatureDef(clear_model, kDefaultServingSignatureDefKey));
EXPECT_EQ(expected_num_buffers, clear_model->buffers()->size());
}
TEST_F(SimpleSignatureDefUtilTest, SetSignatureDefErrorsTest) {
std::map<string, SignatureDef> test_signature_def_map;
std::string model_output;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, &model_output)));
SignatureDef test_signature_def;
test_signature_def_map[kDefaultServingSignatureDefKey] = test_signature_def;
EXPECT_TRUE(tensorflow::errors::IsInvalidArgument(
SetSignatureDefMap(model_, test_signature_def_map, nullptr)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/signature/signature_def_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/signature/signature_def_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7fc42d15-469c-4a02-a9ec-48ef227ad356 | cpp | google/googletest | gmock-pp | googlemock/include/gmock/internal/gmock-pp.h | googlemock/test/gmock-pp_test.cc | #ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PP_H_
#define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_PP_H_
#define GMOCK_PP_CAT(_1, _2) GMOCK_PP_INTERNAL_CAT(_1, _2)
#define GMOCK_PP_STRINGIZE(...) GMOCK_PP_INTERNAL_STRINGIZE(__VA_ARGS__)
#define GMOCK_PP_EMPTY(...)
#define GMOCK_PP_COMMA(...) ,
#define GMOCK_PP_IDENTITY(_1) _1
#define GMOCK_PP_NARG(...) \
GMOCK_PP_INTERNAL_16TH( \
(__VA_ARGS__, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0))
#define GMOCK_PP_HAS_COMMA(...) \
GMOCK_PP_INTERNAL_16TH( \
(__VA_ARGS__, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0))
#define GMOCK_PP_HEAD(...) GMOCK_PP_INTERNAL_HEAD((__VA_ARGS__, unusedArg))
#define GMOCK_PP_TAIL(...) GMOCK_PP_INTERNAL_TAIL((__VA_ARGS__))
#define GMOCK_PP_VARIADIC_CALL(_Macro, ...) \
GMOCK_PP_IDENTITY( \
GMOCK_PP_CAT(_Macro, GMOCK_PP_NARG(__VA_ARGS__))(__VA_ARGS__))
#define GMOCK_PP_IS_EMPTY(...) \
GMOCK_PP_INTERNAL_IS_EMPTY(GMOCK_PP_HAS_COMMA(__VA_ARGS__), \
GMOCK_PP_HAS_COMMA(GMOCK_PP_COMMA __VA_ARGS__), \
GMOCK_PP_HAS_COMMA(__VA_ARGS__()), \
GMOCK_PP_HAS_COMMA(GMOCK_PP_COMMA __VA_ARGS__()))
#define GMOCK_PP_IF(_Cond, _Then, _Else) \
GMOCK_PP_CAT(GMOCK_PP_INTERNAL_IF_, _Cond)(_Then, _Else)
#define GMOCK_PP_GENERIC_IF(_Cond, _Then, _Else) \
GMOCK_PP_REMOVE_PARENS(GMOCK_PP_IF(_Cond, _Then, _Else))
#define GMOCK_PP_NARG0(...) \
GMOCK_PP_IF(GMOCK_PP_IS_EMPTY(__VA_ARGS__), 0, GMOCK_PP_NARG(__VA_ARGS__))
#define GMOCK_PP_IS_BEGIN_PARENS(...) \
GMOCK_PP_HEAD(GMOCK_PP_CAT(GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_R_, \
GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_C __VA_ARGS__))
#define GMOCK_PP_IS_ENCLOSED_PARENS(...) \
GMOCK_PP_IF(GMOCK_PP_IS_BEGIN_PARENS(__VA_ARGS__), \
GMOCK_PP_IS_EMPTY(GMOCK_PP_EMPTY __VA_ARGS__), 0)
#define GMOCK_PP_REMOVE_PARENS(...) GMOCK_PP_INTERNAL_REMOVE_PARENS __VA_ARGS__
#define GMOCK_PP_FOR_EACH(_Macro, _Data, _Tuple) \
GMOCK_PP_CAT(GMOCK_PP_INTERNAL_FOR_EACH_IMPL_, GMOCK_PP_NARG0 _Tuple) \
(0, _Macro, _Data, _Tuple)
#define GMOCK_PP_REPEAT(_Macro, _Data, _N) \
GMOCK_PP_CAT(GMOCK_PP_INTERNAL_FOR_EACH_IMPL_, _N) \
(0, _Macro, _Data, GMOCK_PP_INTENRAL_EMPTY_TUPLE)
#define GMOCK_PP_INC(_i) GMOCK_PP_CAT(GMOCK_PP_INTERNAL_INC_, _i)
#define GMOCK_PP_COMMA_IF(_i) GMOCK_PP_CAT(GMOCK_PP_INTERNAL_COMMA_IF_, _i)
#define GMOCK_PP_INTENRAL_EMPTY_TUPLE (, , , , , , , , , , , , , , , )
#define GMOCK_PP_INTERNAL_CAT(_1, _2) _1##_2
#define GMOCK_PP_INTERNAL_STRINGIZE(...) #__VA_ARGS__
#define GMOCK_PP_INTERNAL_CAT_5(_1, _2, _3, _4, _5) _1##_2##_3##_4##_5
#define GMOCK_PP_INTERNAL_IS_EMPTY(_1, _2, _3, _4) \
GMOCK_PP_HAS_COMMA(GMOCK_PP_INTERNAL_CAT_5(GMOCK_PP_INTERNAL_IS_EMPTY_CASE_, \
_1, _2, _3, _4))
#define GMOCK_PP_INTERNAL_IS_EMPTY_CASE_0001 ,
#define GMOCK_PP_INTERNAL_IF_1(_Then, _Else) _Then
#define GMOCK_PP_INTERNAL_IF_0(_Then, _Else) _Else
#define GMOCK_PP_INTERNAL_INTERNAL_16TH(_1, _2, _3, _4, _5, _6, _7, _8, _9, \
_10, _11, _12, _13, _14, _15, _16, \
...) \
_16
#define GMOCK_PP_INTERNAL_16TH(_Args) \
GMOCK_PP_IDENTITY(GMOCK_PP_INTERNAL_INTERNAL_16TH _Args)
#define GMOCK_PP_INTERNAL_INTERNAL_HEAD(_1, ...) _1
#define GMOCK_PP_INTERNAL_HEAD(_Args) \
GMOCK_PP_IDENTITY(GMOCK_PP_INTERNAL_INTERNAL_HEAD _Args)
#define GMOCK_PP_INTERNAL_INTERNAL_TAIL(_1, ...) __VA_ARGS__
#define GMOCK_PP_INTERNAL_TAIL(_Args) \
GMOCK_PP_IDENTITY(GMOCK_PP_INTERNAL_INTERNAL_TAIL _Args)
#define GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_C(...) 1 _
#define GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_R_1 1,
#define GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_R_GMOCK_PP_INTERNAL_IBP_IS_VARIADIC_C \
0,
#define GMOCK_PP_INTERNAL_REMOVE_PARENS(...) __VA_ARGS__
#define GMOCK_PP_INTERNAL_INC_0 1
#define GMOCK_PP_INTERNAL_INC_1 2
#define GMOCK_PP_INTERNAL_INC_2 3
#define GMOCK_PP_INTERNAL_INC_3 4
#define GMOCK_PP_INTERNAL_INC_4 5
#define GMOCK_PP_INTERNAL_INC_5 6
#define GMOCK_PP_INTERNAL_INC_6 7
#define GMOCK_PP_INTERNAL_INC_7 8
#define GMOCK_PP_INTERNAL_INC_8 9
#define GMOCK_PP_INTERNAL_INC_9 10
#define GMOCK_PP_INTERNAL_INC_10 11
#define GMOCK_PP_INTERNAL_INC_11 12
#define GMOCK_PP_INTERNAL_INC_12 13
#define GMOCK_PP_INTERNAL_INC_13 14
#define GMOCK_PP_INTERNAL_INC_14 15
#define GMOCK_PP_INTERNAL_INC_15 16
#define GMOCK_PP_INTERNAL_COMMA_IF_0
#define GMOCK_PP_INTERNAL_COMMA_IF_1 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_2 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_3 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_4 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_5 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_6 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_7 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_8 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_9 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_10 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_11 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_12 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_13 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_14 ,
#define GMOCK_PP_INTERNAL_COMMA_IF_15 ,
#define GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, _element) \
_Macro(_i, _Data, _element)
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_0(_i, _Macro, _Data, _Tuple)
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_1(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple)
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_2(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_1(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_3(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_2(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_4(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_3(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_5(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_4(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_6(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_5(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_7(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_6(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_8(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_7(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_9(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_8(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_10(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_9(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_11(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_10(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_12(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_11(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_13(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_12(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_14(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_13(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#define GMOCK_PP_INTERNAL_FOR_EACH_IMPL_15(_i, _Macro, _Data, _Tuple) \
GMOCK_PP_INTERNAL_CALL_MACRO(_Macro, _i, _Data, GMOCK_PP_HEAD _Tuple) \
GMOCK_PP_INTERNAL_FOR_EACH_IMPL_14(GMOCK_PP_INC(_i), _Macro, _Data, \
(GMOCK_PP_TAIL _Tuple))
#endif | #include "gmock/internal/gmock-pp.h"
#define GMOCK_TEST_REPLACE_comma_WITH_COMMA_I_comma ,
#define GMOCK_TEST_REPLACE_comma_WITH_COMMA(x) \
GMOCK_PP_CAT(GMOCK_TEST_REPLACE_comma_WITH_COMMA_I_, x)
namespace testing {
namespace internal {
namespace gmockpp {
static_assert(GMOCK_PP_CAT(1, 4) == 14, "");
static_assert(GMOCK_PP_INTERNAL_INTERNAL_16TH(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18) == 16,
"");
static_assert(GMOCK_PP_NARG() == 1, "");
static_assert(GMOCK_PP_NARG(x) == 1, "");
static_assert(GMOCK_PP_NARG(x, y) == 2, "");
static_assert(GMOCK_PP_NARG(x, y, z) == 3, "");
static_assert(GMOCK_PP_NARG(x, y, z, w) == 4, "");
static_assert(!GMOCK_PP_HAS_COMMA(), "");
static_assert(GMOCK_PP_HAS_COMMA(b, ), "");
static_assert(!GMOCK_PP_HAS_COMMA((, )), "");
static_assert(GMOCK_PP_HAS_COMMA(GMOCK_TEST_REPLACE_comma_WITH_COMMA(comma)),
"");
static_assert(
GMOCK_PP_HAS_COMMA(GMOCK_TEST_REPLACE_comma_WITH_COMMA(comma(unrelated))),
"");
static_assert(!GMOCK_PP_IS_EMPTY(, ), "");
static_assert(!GMOCK_PP_IS_EMPTY(a), "");
static_assert(!GMOCK_PP_IS_EMPTY(()), "");
static_assert(GMOCK_PP_IF(1, 1, 2) == 1, "");
static_assert(GMOCK_PP_IF(0, 1, 2) == 2, "");
static_assert(GMOCK_PP_NARG0(x) == 1, "");
static_assert(GMOCK_PP_NARG0(x, y) == 2, "");
static_assert(GMOCK_PP_HEAD(1) == 1, "");
static_assert(GMOCK_PP_HEAD(1, 2) == 1, "");
static_assert(GMOCK_PP_HEAD(1, 2, 3) == 1, "");
static_assert(GMOCK_PP_TAIL(1, 2) == 2, "");
static_assert(GMOCK_PP_HEAD(GMOCK_PP_TAIL(1, 2, 3)) == 2, "");
static_assert(!GMOCK_PP_IS_BEGIN_PARENS(sss), "");
static_assert(!GMOCK_PP_IS_BEGIN_PARENS(sss()), "");
static_assert(!GMOCK_PP_IS_BEGIN_PARENS(sss() sss), "");
static_assert(GMOCK_PP_IS_BEGIN_PARENS((sss)), "");
static_assert(GMOCK_PP_IS_BEGIN_PARENS((sss)ss), "");
static_assert(!GMOCK_PP_IS_ENCLOSED_PARENS(sss), "");
static_assert(!GMOCK_PP_IS_ENCLOSED_PARENS(sss()), "");
static_assert(!GMOCK_PP_IS_ENCLOSED_PARENS(sss() sss), "");
static_assert(!GMOCK_PP_IS_ENCLOSED_PARENS((sss)ss), "");
static_assert(GMOCK_PP_REMOVE_PARENS((1 + 1)) * 2 == 3, "");
static_assert(GMOCK_PP_INC(4) == 5, "");
template <class... Args>
struct Test {
static constexpr int kArgs = sizeof...(Args);
};
#define GMOCK_PP_INTERNAL_TYPE_TEST(_i, _Data, _element) \
GMOCK_PP_COMMA_IF(_i) _element
static_assert(Test<GMOCK_PP_FOR_EACH(GMOCK_PP_INTERNAL_TYPE_TEST, ~,
(int, float, double, char))>::kArgs == 4,
"");
#define GMOCK_PP_INTERNAL_VAR_TEST_1(_x) 1
#define GMOCK_PP_INTERNAL_VAR_TEST_2(_x, _y) 2
#define GMOCK_PP_INTERNAL_VAR_TEST_3(_x, _y, _z) 3
#define GMOCK_PP_INTERNAL_VAR_TEST(...) \
GMOCK_PP_VARIADIC_CALL(GMOCK_PP_INTERNAL_VAR_TEST_, __VA_ARGS__)
static_assert(GMOCK_PP_INTERNAL_VAR_TEST(x, y) == 2, "");
static_assert(GMOCK_PP_INTERNAL_VAR_TEST(silly) == 1, "");
static_assert(GMOCK_PP_INTERNAL_VAR_TEST(x, y, z) == 3, "");
#define GMOCK_PP_INTERNAL_IS_EMPTY_TEST_1
static_assert(GMOCK_PP_IS_EMPTY(GMOCK_PP_INTERNAL_IS_EMPTY_TEST_1), "");
static_assert(GMOCK_PP_IS_EMPTY(), "");
static_assert(GMOCK_PP_IS_ENCLOSED_PARENS((sss)), "");
static_assert(GMOCK_PP_IS_EMPTY(GMOCK_PP_TAIL(1)), "");
static_assert(GMOCK_PP_NARG0() == 0, "");
}
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/include/gmock/internal/gmock-pp.h | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-pp_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
138b136e-4e08-44d1-9be5-5b3d37a1beda | cpp | tensorflow/tensorflow | batch_to_space_nd | tensorflow/lite/kernels/batch_to_space_nd.cc | tensorflow/lite/kernels/internal/batch_to_space_nd_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace batch_to_space_nd {
enum KernelType {
kReference,
kGenericOptimized,
};
struct BatchToSpaceNDContext {
BatchToSpaceNDContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
block_shape = GetInput(context, node, 1);
crops = GetInput(context, node, 2);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
const TfLiteTensor* block_shape;
const TfLiteTensor* crops;
TfLiteTensor* output;
};
const int kInputMinDimensionNum = 3;
const int kInputMaxDimensionNum = 4;
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
BatchToSpaceNDContext* op_context) {
TfLiteIntArray* input_size = op_context->input->dims;
const int* block_shape = GetTensorData<int32>(op_context->block_shape);
const int* crops = GetTensorData<int32>(op_context->crops);
int spatial_dims_num = input_size->size - 2;
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->block_shape), 1);
TF_LITE_ENSURE_EQ(context, op_context->block_shape->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, NumDimensions(op_context->crops), 2);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[0],
spatial_dims_num);
TF_LITE_ENSURE_EQ(context, op_context->crops->dims->data[1], 2);
for (int i = 0; i < spatial_dims_num * 2; ++i) {
TF_LITE_ENSURE(context, crops[i] >= 0);
}
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input_size);
int output_batch_size = input_size->data[0];
for (int dim = 0; dim < spatial_dims_num; ++dim) {
TF_LITE_ENSURE(context, block_shape[dim] != 0);
TF_LITE_ENSURE_EQ(context, output_batch_size % block_shape[dim], 0);
output_batch_size = output_batch_size / block_shape[dim];
output_size->data[dim + 1] = input_size->data[dim + 1] * block_shape[dim] -
crops[dim * 2] - crops[dim * 2 + 1];
}
output_size->data[0] = output_batch_size;
output_size->data[input_size->size - 1] =
input_size->data[input_size->size - 1];
return context->ResizeTensor(context, op_context->output, output_size);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
BatchToSpaceNDContext op_context(context, node);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) >= kInputMinDimensionNum);
TF_LITE_ENSURE(context,
NumDimensions(op_context.input) <= kInputMaxDimensionNum);
TF_LITE_ENSURE_EQ(context, op_context.input->type, op_context.output->type);
if (op_context.input->type == kTfLiteUInt8 ||
op_context.input->type == kTfLiteInt8 ||
op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.scale,
op_context.output->params.scale);
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point,
op_context.output->params.zero_point);
}
if (op_context.input->type == kTfLiteInt16) {
TF_LITE_ENSURE_EQ(context, op_context.input->params.zero_point, 0);
TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, 0);
}
if (!IsConstantOrPersistentTensor(op_context.block_shape) ||
!IsConstantOrPersistentTensor(op_context.crops)) {
SetTensorToDynamic(op_context.output);
return kTfLiteOk;
}
return ResizeOutputTensor(context, &op_context);
}
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
BatchToSpaceNDContext op_context(context, node);
if (IsDynamicTensor(op_context.output)) {
TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context));
}
#define TF_LITE_BATCH_TO_SPACE_ND(type, scalar) \
type::BatchToSpaceND(GetTensorShape(op_context.input), \
GetTensorData<scalar>(op_context.input), \
GetTensorShape(op_context.block_shape), \
GetTensorData<int32_t>(op_context.block_shape), \
GetTensorShape(op_context.crops), \
GetTensorData<int32_t>(op_context.crops), \
GetTensorShape(op_context.output), \
GetTensorData<scalar>(op_context.output))
switch (op_context.input->type) {
case kTfLiteFloat32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, float);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, float);
}
break;
case kTfLiteUInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, uint8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, uint8_t);
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int8_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int8_t);
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int16_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int16_t);
}
break;
case kTfLiteInt32:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int32_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int32_t);
}
break;
case kTfLiteInt64:
if (kernel_type == kReference) {
TF_LITE_BATCH_TO_SPACE_ND(reference_ops, int64_t);
} else {
TF_LITE_BATCH_TO_SPACE_ND(optimized_ops, int64_t);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Type %d is currently not supported by BatchToSpace.",
op_context.input->type);
return kTfLiteError;
}
#undef TF_LITE_BATCH_TO_SPACE_ND
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_REF() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kReference>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND_GENERIC_OPT() {
static TfLiteRegistration r = {
nullptr, nullptr, batch_to_space_nd::Prepare,
batch_to_space_nd::Eval<batch_to_space_nd::kGenericOptimized>};
return &r;
}
TfLiteRegistration* Register_BATCH_TO_SPACE_ND() {
return Register_BATCH_TO_SPACE_ND_GENERIC_OPT();
}
}
}
} | #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include <gtest/gtest.h>
namespace tflite {
namespace {
std::pair<int, int> GetIndexRange(int spatial_index_dim, int block_shape_dim,
int input_dim, int output_dim) {
int index_start = 0;
int index_end = 0;
optimized_ops::GetIndexRange(spatial_index_dim, block_shape_dim, input_dim,
output_dim, &index_start, &index_end);
return {index_start, index_end};
}
TEST(BatchToSpaceNDTest, TestIndexRange) {
EXPECT_EQ(GetIndexRange(3, 6,
1, 6),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(2, 6,
5, 30),
std::make_pair(0, 5));
EXPECT_EQ(GetIndexRange(0, 2,
3, 4),
std::make_pair(0, 2));
EXPECT_EQ(GetIndexRange(-2, 2,
3, 4),
std::make_pair(1, 3));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(-26, 5,
7, 5),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(0, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(4, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(3, 5,
7, 5),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(0, 5,
7, 1),
std::make_pair(0, 1));
EXPECT_EQ(GetIndexRange(-30, 5,
7, 1),
std::make_pair(6, 7));
EXPECT_EQ(GetIndexRange(1, 5,
7, 1),
std::make_pair(0, 0));
EXPECT_EQ(GetIndexRange(-29, 5,
7, 1),
std::make_pair(6, 6));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/batch_to_space_nd.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/batch_to_space_nd_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ca4d441-4a85-4359-b35d-0b039f6559a9 | cpp | tensorflow/tensorflow | grpc_util | tensorflow/core/distributed_runtime/rpc/grpc_util.cc | tensorflow/core/data/service/grpc_util_test.cc | #include "tensorflow/core/distributed_runtime/rpc/grpc_util.h"
#include "tensorflow/core/distributed_runtime/tensor_coding.h"
namespace tensorflow {
bool GrpcMaybeParseTensorResponse(::grpc::ByteBuffer* src,
TensorResponse* dst) {
::tensorflow::GrpcByteSource byte_source(src);
auto s = dst->ParseFrom(&byte_source);
return s.ok();
}
} | #include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace grpc_util {
TEST(GrpcUtil, WrapInvalidArgument) {
grpc::Status s(grpc::StatusCode::INVALID_ARGUMENT, "test message");
Status wrapped = WrapError("wrapping message", s);
ASSERT_EQ(wrapped, errors::InvalidArgument("wrapping message: test message"));
}
TEST(GrpcUtil, WrapOk) {
grpc::Status s;
Status wrapped = WrapError("wrapping message", s);
ASSERT_EQ(wrapped, errors::Internal("Expected a non-ok grpc status. Wrapping "
"message: wrapping message"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/rpc/grpc_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/grpc_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4cd0009a-37ed-49dc-a958-326dbdc4603e | cpp | google/arolla | seq_map_operator | arolla/qexpr/eval_extensions/seq_map_operator.cc | arolla/qexpr/eval_extensions/seq_map_operator_test.cc | #include "arolla/qexpr/eval_extensions/seq_map_operator.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/eval/dynamic_compiled_expr.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/eval/extensions.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/seq_map_expr_operator.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/sequence/mutable_sequence.h"
#include "arolla/sequence/sequence.h"
#include "arolla/sequence/sequence_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
absl::StatusOr<ExprNodePtr> SeqMapOperatorTransformation(
const DynamicEvaluationEngineOptions&, ExprNodePtr node) {
ASSIGN_OR_RETURN(auto seq_map_op, DecayRegisteredOperator(node->op()));
if (seq_map_op == nullptr || typeid(*seq_map_op) != typeid(SeqMapOperator)) {
return node;
}
const auto& node_deps = node->node_deps();
if (node_deps.size() < 2) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected number of arguments: expected at least two, got %d",
node_deps.size()));
}
const auto& op_node = node_deps[0];
if (op_node->qtype() == nullptr) {
return absl::FailedPreconditionError("missing node_deps[0].qtype");
}
if (op_node->qtype() != GetQType<ExprOperatorPtr>()) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected node_deps[0].qtype: expected %s, got %s",
GetQType<ExprOperatorPtr>()->name(), op_node->qtype()->name()));
}
const auto& op_qvalue = op_node->qvalue();
if (!op_qvalue.has_value()) {
return absl::FailedPreconditionError("missing node_deps[0].literal_value");
}
DCHECK(op_qvalue->GetType() == GetQType<ExprOperatorPtr>());
const auto& op = op_qvalue->UnsafeAs<ExprOperatorPtr>();
return MakeOpNode(
std::make_shared<PackedSeqMapOperator>(op),
std::vector<ExprNodePtr>(node_deps.begin() + 1, node_deps.end()));
}
std::optional<absl::Status> CompilePackedSeqMapOperator(
const CompileOperatorFnArgs& args) {
const auto* map_op = dynamic_cast<const PackedSeqMapOperator*>(args.op.get());
if (map_op == nullptr) {
return std::nullopt;
}
if (args.input_slots.empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("expected at least one input slot, got none"));
}
if (!IsSequenceQType(args.output_slot.GetType())) {
return absl::FailedPreconditionError(
absl::StrFormat("expected a sequence type, got output_qtype = %s",
args.output_slot.GetType()->name()));
}
std::vector<QTypePtr> value_qtypes;
value_qtypes.reserve(args.input_slots.size());
for (size_t i = 0; i < args.input_slots.size(); ++i) {
value_qtypes.push_back(args.input_slots[i].GetType()->value_qtype());
DCHECK(value_qtypes.back() != nullptr);
}
std::vector<TypedSlot> mapper_arg_slots;
mapper_arg_slots.reserve(args.input_slots.size());
for (size_t i = 0; i < args.input_slots.size(); ++i) {
mapper_arg_slots.push_back(
AddSlot(value_qtypes[i], args.executable_builder->layout_builder()));
}
auto mapper_output_slot = AddSlot(args.output_slot.GetType()->value_qtype(),
args.executable_builder->layout_builder());
DynamicEvaluationEngineOptions subexpression_options(args.options);
subexpression_options.enabled_preparation_stages =
DynamicEvaluationEngineOptions::PreparationStage::kAll;
ASSIGN_OR_RETURN(
std::shared_ptr<BoundExpr> mapper_bound_expr,
CompileAndBindExprOperator(
subexpression_options, args.executable_builder->layout_builder(),
map_op->op(), mapper_arg_slots, mapper_output_slot));
std::string init_op_description;
std::string eval_op_description;
if (args.options.collect_op_descriptions) {
auto dynamic_bound_expr =
dynamic_cast<const DynamicBoundExpr*>(mapper_bound_expr.get());
if (dynamic_bound_expr == nullptr) {
return absl::InternalError("expected DynamicBoundExpr");
}
auto init_op_name = absl::StrFormat(
"%s:init{%s}", map_op->display_name(),
absl::StrJoin(dynamic_bound_expr->init_op_descriptions(), "; "));
init_op_description = FormatOperatorCall(init_op_name, {}, {});
auto eval_op_name = absl::StrFormat(
"%s:eval{%s}", map_op->display_name(),
absl::StrJoin(dynamic_bound_expr->eval_op_descriptions(), "; "));
eval_op_description =
FormatOperatorCall(eval_op_name, args.input_slots, {args.output_slot});
}
args.executable_builder->AddInitOp(
MakeBoundOperator(
[mapper_bound_expr](EvaluationContext* ctx, FramePtr frame) {
mapper_bound_expr->InitializeLiterals(ctx, frame);
}),
init_op_description);
args.executable_builder->AddEvalOp(
MakeBoundOperator([input_slots = std::vector(args.input_slots.begin(),
args.input_slots.end()),
output_slot = args.output_slot, mapper_bound_expr,
mapper_arg_slots, mapper_output_slot](
EvaluationContext* ctx, FramePtr frame) {
std::optional<size_t> seq_size = std::nullopt;
for (size_t i = 0; i < input_slots.size(); ++i) {
const auto& cur_slot = input_slots[i];
const auto& seq = frame.Get(cur_slot.UnsafeToSlot<Sequence>());
if (seq_size.has_value() && *seq_size != seq.size()) {
ctx->set_status(absl::InvalidArgumentError(absl::StrFormat(
"expected all sequences to have the same length, got %d and %d",
*seq_size, seq.size())));
return;
}
seq_size = seq.size();
}
const size_t output_value_size =
mapper_output_slot.GetType()->type_layout().AllocSize();
ASSIGN_OR_RETURN(
auto mutable_sequence,
MutableSequence::Make(mapper_output_slot.GetType(), *seq_size),
ctx->set_status(std::move(_)));
for (size_t i = 0; i < seq_size && ctx->status().ok(); ++i) {
for (size_t arg_id = 0; arg_id < input_slots.size(); ++arg_id) {
const auto& cur_slot = input_slots[arg_id];
const auto& seq = frame.Get(cur_slot.UnsafeToSlot<Sequence>());
seq.GetRef(i)
.CopyToSlot(mapper_arg_slots[arg_id], frame)
.IgnoreError();
}
mapper_bound_expr->Execute(ctx, frame);
mapper_output_slot.GetType()->UnsafeCopy(
frame.GetRawPointer(mapper_output_slot.byte_offset()),
mutable_sequence.RawAt(i, output_value_size));
}
frame.Set(output_slot.UnsafeToSlot<Sequence>(),
std::move(mutable_sequence).Finish());
}),
eval_op_description,
"seq.map");
return absl::OkStatus();
}
}
PackedSeqMapOperator::PackedSeqMapOperator(ExprOperatorPtr op)
: ExprOperatorWithFixedSignature(
absl::StrFormat("packed_seq_map[%s]", op->display_name()),
ExprOperatorSignature::MakeVariadicArgs(),
"(internal operator) packed seq.map",
FingerprintHasher("arolla::expr::eval_internal::PackedSeqMapOperator")
.Combine(op->fingerprint())
.Finish()),
op_(std::move(op)) {}
absl::StatusOr<ExprAttributes> PackedSeqMapOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
std::vector<ExprAttributes> new_inputs;
new_inputs.reserve(inputs.size() + 1);
new_inputs.emplace_back(GetQType<ExprOperatorPtr>(),
TypedValue::FromValue(op_));
new_inputs.insert(new_inputs.end(), inputs.begin(), inputs.end());
return SeqMapOperator::Make()->InferAttributes(new_inputs);
}
AROLLA_INITIALIZER(
.reverse_deps =
{
::arolla::initializer_dep::kOperators,
::arolla::initializer_dep::kQExprOperators,
},
.init_fn = [] {
CompilerExtensionRegistry::GetInstance().RegisterNodeTransformationFn(
SeqMapOperatorTransformation);
CompilerExtensionRegistry::GetInstance().RegisterCompileOperatorFn(
CompilePackedSeqMapOperator);
})
} | #include "arolla/qexpr/eval_extensions/seq_map_operator.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/sequence/sequence_qtype.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::EqualsExpr;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::NotNull;
TEST(SeqMapOperatorTest, SeqMapOperatorTransformation) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr add_operator,
LookupOperator("math.add"));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("seq.map", {Literal(add_operator),
Leaf("xs"), Leaf("ys")}));
EXPECT_THAT(expr->qtype(), Eq(nullptr));
QTypePtr seq_i32 = GetSequenceQType(GetQType<int32_t>());
ASSERT_OK_AND_ASSIGN(
auto prepared_expr,
PrepareExpression(expr, {{"xs", seq_i32}, {"ys", seq_i32}},
DynamicEvaluationEngineOptions{}));
EXPECT_THAT(prepared_expr->qtype(), Eq(seq_i32));
auto packed_op =
dynamic_cast<const PackedSeqMapOperator*>(prepared_expr->op().get());
ASSERT_THAT(packed_op, NotNull());
EXPECT_THAT(packed_op->op()->display_name(), Eq("math.add"));
EXPECT_THAT(packed_op->display_name(), Eq("packed_seq_map[math.add]"));
EXPECT_THAT(prepared_expr->node_deps(),
ElementsAre(
EqualsExpr(CallOp(QTypeAnnotation::Make(),
{Leaf("xs"), Literal(seq_i32)})),
EqualsExpr(CallOp(QTypeAnnotation::Make(),
{Leaf("ys"), Literal(seq_i32)}))));
}
TEST(SeqMapOperatorTest, CompilePackedSeqMapOperator) {
ASSERT_OK_AND_ASSIGN(
ExprOperatorPtr x_plus_y_mul_2,
MakeLambdaOperator(
"x_plus_y_mul_2", ExprOperatorSignature::Make("x, y"),
CallOp("math.multiply",
{CallOp("math.add", {Placeholder("x"), Placeholder("y")}),
Literal(int32_t{2})})));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp("seq.map", {Literal(x_plus_y_mul_2),
Leaf("xs"), Leaf("ys")}));
QTypePtr seq_i32 = GetSequenceQType(GetQType<int32_t>());
FrameLayout::Builder layout_builder;
auto xs_slot = AddSlot(seq_i32, &layout_builder);
auto ys_slot = AddSlot(seq_i32, &layout_builder);
DynamicEvaluationEngineOptions options{.collect_op_descriptions = true};
EXPECT_THAT(
CompileAndBindForDynamicEvaluation(options, &layout_builder, expr,
{{"xs", xs_slot}, {"ys", ys_slot}}),
IsOkAndHolds(AllOf(
InitOperationsAre("packed_seq_map[x_plus_y_mul_2]:init{"
"INT32 [0x70] = 2"
"}()"),
EvalOperationsAre(
"SEQUENCE[INT32] [0x40] = packed_seq_map[x_plus_y_mul_2]:eval{"
"INT32 [0x6C] = math.add(INT32 [0x60], INT32 [0x64]); "
"INT32 [0x68] = math.multiply(INT32 [0x6C], INT32 [0x70])"
"}(SEQUENCE[INT32] [0x00], SEQUENCE[INT32] [0x20])"))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/eval_extensions/seq_map_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/eval_extensions/seq_map_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ec92ce8c-32f6-44bf-8aec-55c051a57685 | cpp | tensorflow/tensorflow | hlo_reachability | third_party/xla/xla/hlo/ir/hlo_reachability.cc | third_party/xla/xla/service/hlo_reachability_test.cc | #include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
HloReachabilityMap::HloReachabilityMap(
absl::Span<const HloInstruction* const> instructions)
: bit_sets_(instructions.size(), BitSet(instructions.size())) {
indices_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
bit_sets_[i].Set(i);
indices_[GetKey(instructions[i])] = i;
}
}
bool HloReachabilityMap::SetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
Index index = GetIndex(instruction);
BitSet& bit_set = bit_sets_[index];
tmp_bit_set_ = bit_set;
SetReachabilityToUnionHelper(inputs, index);
return bit_set != tmp_bit_set_;
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
SetReachabilityToUnionHelper(inputs, GetIndex(instruction));
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const Index> input_indices, Index index) {
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const HloInstruction* const> inputs, Index index) {
absl::InlinedVector<Index, 16> input_indices;
input_indices.reserve(inputs.size());
for (const HloInstruction* input : inputs) {
input_indices.push_back(GetIndex(input));
}
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const Index> input_indices, Index index) {
BitSet& bit_set = bit_sets_[index];
if (!absl::c_linear_search(input_indices, index)) {
bit_set.SetToZero();
}
bit_set.Set(index);
for (Index input_index : input_indices) {
if (input_index != index) {
bit_set |= bit_sets_[input_index];
}
}
}
void HloReachabilityMap::Replace(const HloInstruction* original,
const HloInstruction* replacement) {
if (GetKey(original) != GetKey(replacement)) {
indices_[GetKey(replacement)] = GetIndex(original);
indices_.erase(GetKey(original));
}
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::BuildWithRestrictions(
const HloComputation* computation,
absl::FunctionRef<void(const HloInstruction*,
std::vector<HloInstruction*>*)>
add_dependencies) {
const auto& all = computation->MakeInstructionPostOrder();
auto result = std::make_unique<HloReachabilityMap>(all);
std::vector<HloInstruction*> inputs;
for (const HloInstruction* hlo : all) {
inputs.clear();
add_dependencies(hlo, &inputs);
result->FastSetReachabilityToUnion(inputs, hlo);
}
return result;
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::Build(
const HloComputation* computation) {
HloComputation::ChannelDependencies channel_dependencies =
computation->ComputeChannelDependencies();
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(channel_dependencies);
auto result = std::make_unique<HloReachabilityMap>(instructions);
auto get_bit_set = [&](const HloInstruction* instruction) -> BitSet& {
return result->bit_sets_[result->GetIndex(instruction)];
};
for (const HloInstruction* instruction : instructions) {
BitSet& bit_set = get_bit_set(instruction);
auto add_dependencies = [&](const HloInstruction* instruction) {
for (const HloInstruction* operand : instruction->operands()) {
bit_set |= get_bit_set(operand);
}
for (const HloInstruction* predecessor :
instruction->control_predecessors()) {
bit_set |= get_bit_set(predecessor);
}
};
add_dependencies(instruction);
auto it = channel_dependencies.find(instruction);
if (it != channel_dependencies.end()) {
absl::c_for_each(it->second, add_dependencies);
}
}
return result;
}
void HloReachabilityMap::UpdateReachabilityThroughInstruction(
const HloInstruction* instruction) {
std::queue<const HloInstruction*> worklist;
worklist.push(instruction);
std::vector<HloInstruction*> inputs;
while (!worklist.empty()) {
const HloInstruction* item = worklist.front();
worklist.pop();
inputs.assign(item->operands().begin(), item->operands().end());
inputs.insert(inputs.end(), item->control_predecessors().begin(),
item->control_predecessors().end());
if (SetReachabilityToUnion(inputs, item)) {
for (const HloInstruction* user : item->users()) {
worklist.push(user);
}
for (const HloInstruction* succ : item->control_successors()) {
worklist.push(succ);
}
}
}
}
} | #include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <set>
#include <string>
#include <string_view>
#include "absl/random/random.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloReachabilityTest : public HloTestBase {};
TEST_F(HloReachabilityTest, Reachability) {
auto builder = HloComputation::Builder(TestName());
auto a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto c = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto d = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto e = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloReachabilityMap reachability({a, b, c, d, e});
reachability.SetReachable(a, a);
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, c));
EXPECT_TRUE(reachability.SetReachabilityToUnion({b, c}, d));
EXPECT_TRUE(reachability.SetReachabilityToUnion({c}, e));
EXPECT_TRUE(reachability.IsReachable(a, a));
EXPECT_TRUE(reachability.IsReachable(a, b));
EXPECT_TRUE(reachability.IsReachable(a, c));
EXPECT_TRUE(reachability.IsReachable(a, d));
EXPECT_TRUE(reachability.IsReachable(a, e));
EXPECT_FALSE(reachability.IsReachable(b, a));
EXPECT_TRUE(reachability.IsReachable(b, b));
EXPECT_FALSE(reachability.IsReachable(b, c));
EXPECT_TRUE(reachability.IsReachable(b, d));
EXPECT_FALSE(reachability.IsReachable(b, e));
EXPECT_FALSE(reachability.IsReachable(e, a));
EXPECT_FALSE(reachability.IsReachable(e, b));
EXPECT_FALSE(reachability.IsReachable(e, c));
EXPECT_FALSE(reachability.IsReachable(e, d));
EXPECT_TRUE(reachability.IsReachable(e, e));
EXPECT_FALSE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_FALSE(reachability.SetReachabilityToUnion({b, c}, d));
}
TEST_F(HloReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
ASSERT_IS_OK(add->RemoveControlDependencyTo(exp));
reachability->UpdateReachabilityThroughInstruction(exp);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_FALSE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_FALSE(reachability->IsReachable(constant1, copy));
ASSERT_IS_OK(constant2->ReplaceUseWith(negate, constant1));
reachability->UpdateReachabilityThroughInstruction(negate);
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_FALSE(reachability->IsReachable(constant2, negate));
EXPECT_FALSE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_FALSE(reachability->IsReachable(constant2, copy));
}
TEST_F(HloReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
TEST_F(HloReachabilityTest, ReplaceInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY entry {
p0 = f32[28,28]{1,0} parameter(0)
ROOT add = f32[28,28]{1,0} add(p0, p0)
})")
.value();
auto computation = module->entry_computation();
auto reachability = HloReachabilityMap::Build(computation);
auto* add = module->entry_computation()->root_instruction();
auto* p0 = add->operand(0);
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, add);
EXPECT_TRUE(reachability->IsReachable(p0, add));
auto* fusion = computation->AddInstruction(HloInstruction::CreateFusion(
add->shape(), HloInstruction::FusionKind::kLoop, add));
EXPECT_FALSE(reachability->IsPresent(fusion));
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, fusion);
EXPECT_FALSE(reachability->IsPresent(add));
EXPECT_TRUE(reachability->IsReachable(p0, fusion));
}
}
class HloReachabilityMapBitSetBenchmark {
public:
explicit HloReachabilityMapBitSetBenchmark(int size) : a_(size), b_(size) {
absl::BitGen gen;
for (int i = 0; i < size; ++i) {
if (absl::Bernoulli(gen, 0.5)) a_.Set(i);
if (absl::Bernoulli(gen, 0.5)) b_.Set(i);
}
}
void Union() { a_ |= b_; }
private:
HloReachabilityMap::BitSet a_;
HloReachabilityMap::BitSet b_;
};
namespace {
void BM_HloReachabilityBitSetUnion(benchmark::State& state) {
HloReachabilityMapBitSetBenchmark bm(state.range(0));
for (auto s : state) {
bm.Union();
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloReachabilityBitSetUnion)->BM_ARGS;
class HloReachabilityBenchmark {
public:
HloReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloReachabilityMap> Build() {
return HloReachabilityMap::Build(computation_);
}
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloReachabilityBuild(benchmark::State& state) {
HloReachabilityBenchmark bm(state.range(0), state.name());
for (auto s : state) {
benchmark::DoNotOptimize(bm.Build());
}
}
BENCHMARK(BM_HloReachabilityBuild)->BM_ARGS;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_reachability.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_reachability_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d0a03331-2a39-4181-8b06-23dbe6434c23 | cpp | tensorflow/tensorflow | file_utils | tensorflow/core/data/service/snapshot/file_utils.cc | tensorflow/core/data/service/snapshot/file_utils_test.cc | #include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/protobuf/status.pb.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/random.h"
#include "tsl/platform/status_to_from_proto.h"
namespace tensorflow {
namespace data {
namespace {
constexpr const char kTempFileSuffix[] = ".tmp";
absl::Status AtomicallyWrite(
absl::string_view filename, tsl::Env* env,
absl::FunctionRef<absl::Status(const std::string&)> nonatomically_write) {
std::string uncommitted_filename = absl::StrCat(filename, "__");
if (!env->CreateUniqueFileName(&uncommitted_filename, kTempFileSuffix)) {
return tsl::errors::Internal("Failed to write file ", filename,
": Unable to create temporary files.");
}
TF_RETURN_IF_ERROR(nonatomically_write(uncommitted_filename));
absl::Status status =
env->RenameFile(uncommitted_filename, std::string(filename));
if (!status.ok()) {
return tsl::errors::Internal("Failed to rename file: ", status.ToString(),
". Source: ", uncommitted_filename,
", destination: ", filename);
}
return status;
}
}
absl::Status AtomicallyWriteStringToFile(absl::string_view filename,
absl::string_view str, tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteStringToFile(env, uncommitted_filename, str));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write string: ", str);
return absl::OkStatus();
}
absl::Status AtomicallyWriteBinaryProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteBinaryProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in binary format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTextProto(absl::string_view filename,
const tsl::protobuf::Message& proto,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
TF_RETURN_IF_ERROR(WriteTextProto(env, uncommitted_filename, proto));
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
"Requested to write proto in text format: ", proto.DebugString());
return absl::OkStatus();
}
absl::Status AtomicallyWriteTFRecords(absl::string_view filename,
const std::vector<Tensor>& tensors,
absl::string_view compression,
tsl::Env* env) {
auto nonatomically_write = [&](const std::string& uncommitted_filename) {
snapshot_util::TFRecordWriter writer(uncommitted_filename,
std::string(compression));
TF_RETURN_IF_ERROR(writer.Initialize(env));
TF_RETURN_IF_ERROR(writer.WriteTensors(tensors));
return writer.Close();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
AtomicallyWrite(filename, env, nonatomically_write),
" Requested to atomically write TF record file: ", filename);
return absl::OkStatus();
}
absl::StatusOr<std::vector<std::string>> GetChildren(
absl::string_view directory, tsl::Env* env) {
std::vector<std::string> files, result;
TF_RETURN_IF_ERROR(env->FileExists(std::string(directory)));
absl::Status status = env->GetChildren(std::string(directory), &files);
if (absl::IsNotFound(status)) {
return result;
}
for (std::string& file : files) {
if (!IsTemporaryFile(file)) {
result.push_back(std::move(file));
}
}
return result;
}
bool IsTemporaryFile(absl::string_view filename) {
return absl::EndsWith(filename, kTempFileSuffix);
}
int64_t SnapshotChunksCardinality(absl::string_view snapshot_path,
tsl::Env* env) {
if (!env->FileExists(SnapshotDoneFilePath(snapshot_path)).ok()) {
return kUnknownCardinality;
}
absl::StatusOr<std::vector<std::string>> chunks =
GetChildren(CommittedChunksDirectory(snapshot_path), env);
if (!chunks.ok()) {
return kUnknownCardinality;
}
return chunks->size();
}
}
} | #include "tensorflow/core/data/service/snapshot/file_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/io/compression.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
absl::StatusOr<std::string> CreateTestDirectory() {
std::string directory;
if (!tsl::Env::Default()->LocalTempFilename(&directory)) {
return tsl::errors::FailedPrecondition(
"Failed to create local test directory.");
}
TF_RETURN_IF_ERROR(tsl::Env::Default()->RecursivelyCreateDir(directory));
return directory;
}
using AtomicallyWriteStringToFileTest = ::testing::TestWithParam<std::string>;
TEST_P(AtomicallyWriteStringToFileTest, WriteString) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
std::string file_contents = GetParam();
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, file_contents,
tsl::Env::Default()));
std::string data;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadFileToString(tsl::Env::Default(), test_file, &data));
EXPECT_EQ(data, file_contents);
}
INSTANTIATE_TEST_SUITE_P(FileContents, AtomicallyWriteStringToFileTest,
::testing::ValuesIn<std::string>({"OK", ""}));
TEST(FileUtilsTest, AtomicallyWriteBinaryProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteBinaryProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadBinaryProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTextProto) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
DatasetDef out = testing::RangeDataset(10);
TF_ASSERT_OK(AtomicallyWriteTextProto(test_file, out, tsl::Env::Default()));
DatasetDef in;
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
TF_ASSERT_OK(tsl::ReadTextProto(tsl::Env::Default(), test_file, &in));
EXPECT_THAT(in, testing::EqualsProto(out));
}
TEST(FileUtilsTest, AtomicallyWriteTFRecord) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
Tensor out = CreateTensor<int64_t>(TensorShape({2}), {1, 2});
TF_ASSERT_OK(AtomicallyWriteTFRecords(
test_file, {out}, tsl::io::compression::kSnappy, tsl::Env::Default()));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(test_file));
snapshot_util::TFRecordReaderImpl reader(test_file,
tsl::io::compression::kSnappy);
TF_ASSERT_OK(reader.Initialize(tsl::Env::Default()));
TF_ASSERT_OK_AND_ASSIGN(std::vector<Tensor> in, reader.GetTensors());
EXPECT_EQ(out.DebugString(), in.front().DebugString());
}
TEST(FileUtilsTest, GetChildren) {
TF_ASSERT_OK_AND_ASSIGN(std::string directory, CreateTestDirectory());
std::string test_file = tsl::io::JoinPath(directory, "test_file");
TF_ASSERT_OK(AtomicallyWriteStringToFile(test_file, "", tsl::Env::Default()));
std::string tmp_file = tsl::io::JoinPath(directory, "test_file.tmp");
TF_ASSERT_OK(AtomicallyWriteStringToFile(tmp_file, "", tsl::Env::Default()));
EXPECT_THAT(GetChildren(directory, tsl::Env::Default()),
IsOkAndHolds(ElementsAre("test_file")));
}
TEST(FileUtilsTest, GetChildrenEmptyDirectory) {
TF_ASSERT_OK_AND_ASSIGN(std::string empty_directory, CreateTestDirectory());
EXPECT_THAT(GetChildren(empty_directory, tsl::Env::Default()),
IsOkAndHolds(IsEmpty()));
}
TEST(FileUtilsTest, GetChildrenDirectoryNotFound) {
EXPECT_THAT(GetChildren("Not exist", tsl::Env::Default()),
StatusIs(tsl::error::NOT_FOUND));
}
TEST(FileUtilsTest, IsTemporaryFile) {
EXPECT_TRUE(IsTemporaryFile("file.tmp"));
EXPECT_FALSE(IsTemporaryFile("file"));
EXPECT_FALSE(IsTemporaryFile(""));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/file_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/snapshot/file_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a0ab9f15-16d6-4963-9b2e-a0ac62c97a9f | cpp | google/libaddressinput | address_validator | cpp/src/address_validator.cc | cpp/test/address_validator_test.cc | #include <libaddressinput/address_validator.h>
#include <cassert>
#include <cstddef>
#include "validation_task.h"
namespace i18n {
namespace addressinput {
AddressValidator::AddressValidator(Supplier* supplier) : supplier_(supplier) {
assert(supplier_ != nullptr);
}
void AddressValidator::Validate(const AddressData& address,
bool allow_postal,
bool require_name,
const FieldProblemMap* filter,
FieldProblemMap* problems,
const Callback& validated) const {
(new ValidationTask(
address,
allow_postal,
require_name,
filter,
problems,
validated))->Run(supplier_);
}
}
} | #include <libaddressinput/address_validator.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/ondemand_supplier.h>
#include <libaddressinput/preload_supplier.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "testdata_source.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressValidator;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::FieldProblemMap;
using i18n::addressinput::NullStorage;
using i18n::addressinput::OndemandSupplier;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::TestdataSource;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::DEPENDENT_LOCALITY;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::POSTAL_CODE;
using i18n::addressinput::STREET_ADDRESS;
using i18n::addressinput::INVALID_FORMAT;
using i18n::addressinput::MISMATCHING_VALUE;
using i18n::addressinput::MISSING_REQUIRED_FIELD;
using i18n::addressinput::UNEXPECTED_FIELD;
using i18n::addressinput::UNKNOWN_VALUE;
using i18n::addressinput::UNSUPPORTED_FIELD;
class ValidatorWrapper {
public:
virtual ~ValidatorWrapper() = default;
virtual void Validate(const AddressData& address, bool allow_postal,
bool require_name, const FieldProblemMap* filter,
FieldProblemMap* problems,
const AddressValidator::Callback& validated) = 0;
};
class OndemandValidatorWrapper : public ValidatorWrapper {
public:
OndemandValidatorWrapper(const OndemandValidatorWrapper&) = delete;
OndemandValidatorWrapper& operator=(const OndemandValidatorWrapper&) = delete;
static ValidatorWrapper* Build() { return new OndemandValidatorWrapper; }
void Validate(const AddressData& address, bool allow_postal,
bool require_name, const FieldProblemMap* filter,
FieldProblemMap* problems,
const AddressValidator::Callback& validated) override {
validator_.Validate(address, allow_postal, require_name, filter, problems,
validated);
}
private:
OndemandValidatorWrapper()
: supplier_(new TestdataSource(false), new NullStorage),
validator_(&supplier_) {}
OndemandSupplier supplier_;
const AddressValidator validator_;
};
class PreloadValidatorWrapper : public ValidatorWrapper {
public:
PreloadValidatorWrapper(const PreloadValidatorWrapper&) = delete;
PreloadValidatorWrapper& operator=(const PreloadValidatorWrapper&) = delete;
static ValidatorWrapper* Build() { return new PreloadValidatorWrapper; }
void Validate(const AddressData& address, bool allow_postal,
bool require_name, const FieldProblemMap* filter,
FieldProblemMap* problems,
const AddressValidator::Callback& validated) override {
const std::string& region_code = address.region_code;
if (!region_code.empty() && !supplier_.IsLoaded(region_code)) {
supplier_.LoadRules(region_code, *loaded_);
}
validator_.Validate(address, allow_postal, require_name, filter, problems,
validated);
}
private:
PreloadValidatorWrapper()
: supplier_(new TestdataSource(true), new NullStorage),
validator_(&supplier_),
loaded_(BuildCallback(this, &PreloadValidatorWrapper::Loaded)) {}
void Loaded(bool success, const std::string&, int) { ASSERT_TRUE(success); }
PreloadSupplier supplier_;
const AddressValidator validator_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_;
};
class AddressValidatorTest
: public testing::TestWithParam<ValidatorWrapper* (*)()> {
public:
AddressValidatorTest(const AddressValidatorTest&) = delete;
AddressValidatorTest& operator=(const AddressValidatorTest&) = delete;
protected:
AddressValidatorTest()
: address_(),
allow_postal_(false),
require_name_(false),
filter_(),
problems_(),
expected_(),
called_(false),
validator_wrapper_((*GetParam())()),
validated_(BuildCallback(this, &AddressValidatorTest::Validated)) {}
void Validate() {
validator_wrapper_->Validate(address_, allow_postal_, require_name_,
&filter_, &problems_, *validated_);
}
AddressData address_;
bool allow_postal_;
bool require_name_;
FieldProblemMap filter_;
FieldProblemMap problems_;
FieldProblemMap expected_;
bool called_;
private:
void Validated(bool success, const AddressData& address,
const FieldProblemMap& problems) {
ASSERT_TRUE(success);
ASSERT_EQ(&address_, &address);
ASSERT_EQ(&problems_, &problems);
called_ = true;
}
const std::unique_ptr<ValidatorWrapper> validator_wrapper_;
const std::unique_ptr<const AddressValidator::Callback> validated_;
};
INSTANTIATE_TEST_SUITE_P(OndemandSupplier, AddressValidatorTest,
testing::Values(&OndemandValidatorWrapper::Build));
INSTANTIATE_TEST_SUITE_P(PreloadSupplier, AddressValidatorTest,
testing::Values(&PreloadValidatorWrapper::Build));
TEST_P(AddressValidatorTest, EmptyAddress) {
expected_ = {{COUNTRY, MISSING_REQUIRED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, InvalidCountry) {
address_ = {.region_code = "QZ"};
expected_ = {{COUNTRY, UNKNOWN_VALUE}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidAddressUS) {
address_ = {
.region_code = "US",
.address_line{"1600 Amphitheatre Parkway"},
.administrative_area = "CA",
.locality = "Mountain View",
.postal_code = "94043",
.language_code = "en",
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_ = {
{LOCALITY, UNSUPPORTED_FIELD},
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
};
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, InvalidAddressUS) {
address_ = {
.region_code = "US",
.postal_code = "123",
};
expected_ = {
{ADMIN_AREA, MISSING_REQUIRED_FIELD},
{LOCALITY, MISSING_REQUIRED_FIELD},
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
{POSTAL_CODE, INVALID_FORMAT},
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_.emplace(DEPENDENT_LOCALITY, UNSUPPORTED_FIELD);
expected_.emplace(LOCALITY, UNSUPPORTED_FIELD);
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidAddressCH) {
address_ = {
.region_code = "CH",
.address_line{"Brandschenkestrasse 110"},
.locality = "ZH",
.postal_code = "8002",
.language_code = "de",
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_ = {
{LOCALITY, UNSUPPORTED_FIELD},
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
};
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, InvalidAddressCH) {
address_ = {
.region_code = "CH",
.postal_code = "123",
};
expected_ = {
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
{POSTAL_CODE, INVALID_FORMAT},
{LOCALITY, MISSING_REQUIRED_FIELD},
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_.emplace(LOCALITY, UNSUPPORTED_FIELD);
expected_.emplace(DEPENDENT_LOCALITY, UNSUPPORTED_FIELD);
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidPostalCodeMX) {
address_ = {
.region_code = "MX",
.address_line{"Av Gregorio Méndez Magaña 1400"},
.administrative_area = "TAB",
.locality = "Villahermosa",
.postal_code = "86070",
.language_code = "es",
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_ = {
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
{LOCALITY, UNSUPPORTED_FIELD},
};
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, MismatchingPostalCodeMX) {
address_ = {
.region_code = "MX",
.address_line{"Av Gregorio Méndez Magaña 1400"},
.administrative_area = "TAB",
.locality = "Villahermosa",
.postal_code = "80000",
.language_code = "es",
};
expected_ = {{POSTAL_CODE, MISMATCHING_VALUE}};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_.emplace(LOCALITY, UNSUPPORTED_FIELD);
expected_.emplace(DEPENDENT_LOCALITY, UNSUPPORTED_FIELD);
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidateFilter) {
address_ = {
.region_code = "CH",
.postal_code = "123",
};
filter_ = {{POSTAL_CODE, INVALID_FORMAT}};
expected_ = {{POSTAL_CODE, INVALID_FORMAT}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidateClearsProblems) {
address_ = {
.region_code = "CH",
.address_line{"Brandschenkestrasse 110"},
.locality = "ZH",
.postal_code = "123",
.language_code = "de",
};
problems_ = {
{LOCALITY, UNEXPECTED_FIELD},
{LOCALITY, MISSING_REQUIRED_FIELD},
{STREET_ADDRESS, MISSING_REQUIRED_FIELD},
};
expected_ = {{POSTAL_CODE, INVALID_FORMAT}};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_.emplace(LOCALITY, UNSUPPORTED_FIELD);
expected_.emplace(DEPENDENT_LOCALITY, UNSUPPORTED_FIELD);
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidKanjiAddressJP) {
address_ = {
.region_code = "JP",
.address_line{"徳島市..."},
.administrative_area = "徳島県",
.postal_code = "770-0847",
.language_code = "ja",
};
if (GetParam() == &PreloadValidatorWrapper::Build) {
expected_ = {
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
{LOCALITY, UNSUPPORTED_FIELD},
};
}
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidLatinAddressJP) {
if (GetParam() == &OndemandValidatorWrapper::Build) return;
address_ = {
.region_code = "JP",
.address_line{"...Tokushima"},
.administrative_area = "Tokushima",
.postal_code = "770-0847",
.language_code = "ja-Latn",
};
expected_ = {
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
{LOCALITY, UNSUPPORTED_FIELD},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidAddressBR) {
if (GetParam() == &OndemandValidatorWrapper::Build) return;
address_ = {
.region_code = "BR",
.address_line{"Rodovia Raposo Tavares, 6388-6682"},
.administrative_area = "São Paulo",
.locality = "Presidente Prudente",
.postal_code = "19063-008",
.language_code = "pt",
};
expected_ = {{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD}};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidAddressCA_en) {
if (GetParam() == &OndemandValidatorWrapper::Build) return;
address_ = {
.region_code = "CA",
.address_line{"..."},
.administrative_area = "New Brunswick",
.locality = "Saint John County",
.postal_code = "E2L 4Z6",
.language_code = "en",
};
expected_ = {
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
{LOCALITY, UNSUPPORTED_FIELD},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
TEST_P(AddressValidatorTest, ValidAddressCA_fr) {
if (GetParam() == &OndemandValidatorWrapper::Build) return;
address_ = {
.region_code = "CA",
.address_line{"..."},
.administrative_area = "Nouveau-Brunswick",
.locality = "Comté de Saint-Jean",
.postal_code = "E2L 4Z6",
.language_code = "fr",
};
expected_ = {
{DEPENDENT_LOCALITY, UNSUPPORTED_FIELD},
{LOCALITY, UNSUPPORTED_FIELD},
};
ASSERT_NO_FATAL_FAILURE(Validate());
ASSERT_TRUE(called_);
EXPECT_EQ(expected_, problems_);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_validator.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_validator_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
49981549-6092-4e51-8692-efceb12efc11 | cpp | google/tensorstore | file_lister | tensorstore/internal/os/file_lister.h | tensorstore/internal/os/file_lister_test.cc | #ifndef TENSORSTORE_INTERNAL_OS_FILE_LISTER_H_
#define TENSORSTORE_INTERNAL_OS_FILE_LISTER_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
namespace tensorstore {
namespace internal_os {
class ListerEntry {
public:
struct Impl;
ListerEntry(Impl* impl) : impl_(impl) {}
bool IsDirectory();
const std::string& GetFullPath();
std::string_view GetPathComponent();
int64_t GetSize();
absl::Status Delete();
private:
Impl* impl_;
};
absl::Status RecursiveFileList(
std::string root_directory,
absl::FunctionRef<bool(std::string_view)> recurse_into,
absl::FunctionRef<absl::Status(ListerEntry)> on_item);
}
}
#endif | #include "tensorstore/internal/os/file_lister.h"
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/os/file_util.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOk;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::internal_os::FsyncDirectory;
using ::tensorstore::internal_os::FsyncFile;
using ::tensorstore::internal_os::MakeDirectory;
using ::tensorstore::internal_os::OpenDirectoryDescriptor;
using ::tensorstore::internal_os::OpenExistingFileForReading;
using ::tensorstore::internal_os::OpenFileForWriting;
using ::tensorstore::internal_os::ReadFromFile;
using ::tensorstore::internal_os::RecursiveFileList;
using ::tensorstore::internal_os::WriteToFile;
using ::tensorstore::internal_testing::ScopedTemporaryDirectory;
static std::optional<ScopedTemporaryDirectory> g_scoped_dir;
void AddFiles(std::string_view root) {
ABSL_CHECK(!root.empty());
TENSORSTORE_CHECK_OK(MakeDirectory(absl::StrCat(root, "/xyz")));
TENSORSTORE_CHECK_OK(MakeDirectory(absl::StrCat(root, "/zzq")));
std::string fname = "/a.txt";
for (; fname[1] < 'd'; fname[1] += 1) {
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto f, OpenFileForWriting(absl::StrCat(root, fname)));
TENSORSTORE_CHECK_OK(FsyncFile(f.get()));
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto g, OpenFileForWriting(absl::StrCat(root, "/xyz", fname)));
TENSORSTORE_CHECK_OK(FsyncFile(g.get()));
}
for (const auto& suffix : {"/xyz", ""}) {
TENSORSTORE_CHECK_OK_AND_ASSIGN(
auto f, OpenDirectoryDescriptor(absl::StrCat(root, suffix)));
EXPECT_THAT(FsyncDirectory(f.get()), IsOk());
}
}
class RecursiveFileListTest : public testing::Test {
public:
static void SetUpTestSuite() {
g_scoped_dir.emplace();
AddFiles(g_scoped_dir->path());
}
static void TearDownTestSuite() { g_scoped_dir = std::nullopt; }
RecursiveFileListTest() : cwd_(g_scoped_dir->path()) {}
private:
tensorstore::internal_testing::ScopedCurrentWorkingDirectory cwd_;
};
TEST_F(RecursiveFileListTest, MissingIsOk) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/aax",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
IsOk());
}
TEST_F(RecursiveFileListTest, EmptyIsOk) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/zzq",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
IsOk());
}
TEST_F(RecursiveFileListTest, FileIsFailure) {
EXPECT_THAT(RecursiveFileList(
g_scoped_dir->path() + "/a.txt",
[](std::string_view path) { return true; },
[](auto entry) { return absl::OkStatus(); }),
::testing::Not(IsOk()));
}
TEST_F(RecursiveFileListTest, FullDirectory) {
for (const std::string& root :
{g_scoped_dir->path(), std::string("."), std::string()}) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
root, [](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetPathComponent()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre(
"c.txt", "b.txt", "a.txt", "<dir>zzq", "c.txt",
"b.txt", "a.txt", "<dir>xyz", "<dir>"));
}
}
TEST_F(RecursiveFileListTest, SubDirectory) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
"xyz", [](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetFullPath()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre("xyz/a.txt", "xyz/b.txt",
"xyz/c.txt", "<dir>xyz"));
}
TEST_F(RecursiveFileListTest, NonRecursive) {
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
"",
[](std::string_view path) {
ABSL_LOG(INFO) << path;
return path.empty();
},
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetFullPath()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files,
::testing::UnorderedElementsAre("c.txt", "b.txt", "a.txt",
"<dir>zzq", "<dir>xyz", "<dir>"));
}
TEST(RecursiveFileListEntryTest, DeleteWithOpenFile) {
ScopedTemporaryDirectory tmpdir;
AddFiles(tmpdir.path());
{
auto f = OpenFileForWriting(absl::StrCat(tmpdir.path(), "/read.txt"));
EXPECT_THAT(f, IsOk());
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
}
{
auto f =
OpenExistingFileForReading(absl::StrCat(tmpdir.path(), "/read.txt"));
EXPECT_THAT(f, IsOk());
std::vector<std::string> files;
EXPECT_THAT(RecursiveFileList(
tmpdir.path(),
[](std::string_view path) { return true; },
[&](auto entry) {
if (entry.GetFullPath() == tmpdir.path()) {
return absl::OkStatus();
}
auto status = entry.Delete();
if (status.ok() || absl::IsNotFound(status))
return absl::OkStatus();
return status;
}),
IsOk());
char buf[16];
EXPECT_THAT(ReadFromFile(f->get(), buf, 3, 0), IsOkAndHolds(3));
}
std::vector<std::string> files;
EXPECT_THAT(
RecursiveFileList(
tmpdir.path(),
[](std::string_view path) { return true; },
[&](auto entry) {
files.push_back(absl::StrCat(entry.IsDirectory() ? "<dir>" : "",
entry.GetPathComponent()));
return absl::OkStatus();
}),
IsOk());
EXPECT_THAT(files, ::testing::UnorderedElementsAre("<dir>"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_lister.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_lister_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d64bcc05-1b16-4b31-85e6-3afedbba556f | cpp | google/quiche | moqt_framer | quiche/quic/moqt/moqt_framer.cc | quiche/quic/moqt/moqt_framer_test.cc | #include "quiche/quic/moqt/moqt_framer.h"
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_priority.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_data_writer.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/wire_serialization.h"
namespace moqt {
namespace {
using ::quiche::QuicheBuffer;
using ::quiche::WireBytes;
using ::quiche::WireOptional;
using ::quiche::WireSpan;
using ::quiche::WireStringWithVarInt62Length;
using ::quiche::WireUint8;
using ::quiche::WireVarInt62;
struct StringParameter {
template <typename Enum>
StringParameter(Enum type, absl::string_view data)
: type(static_cast<uint64_t>(type)), data(data) {
static_assert(std::is_enum_v<Enum>);
}
uint64_t type;
absl::string_view data;
};
class WireStringParameter {
public:
using DataType = StringParameter;
explicit WireStringParameter(const StringParameter& parameter)
: parameter_(parameter) {}
size_t GetLengthOnWire() {
return quiche::ComputeLengthOnWire(
WireVarInt62(parameter_.type),
WireStringWithVarInt62Length(parameter_.data));
}
absl::Status SerializeIntoWriter(quiche::QuicheDataWriter& writer) {
return quiche::SerializeIntoWriter(
writer, WireVarInt62(parameter_.type),
WireStringWithVarInt62Length(parameter_.data));
}
private:
const StringParameter& parameter_;
};
struct IntParameter {
template <typename Enum, typename Param>
IntParameter(Enum type, Param value)
: type(static_cast<uint64_t>(type)), value(static_cast<uint64_t>(value)) {
static_assert(std::is_enum_v<Enum>);
static_assert(std::is_enum_v<Param> || std::is_unsigned_v<Param>);
}
uint64_t type;
uint64_t value;
};
class WireIntParameter {
public:
using DataType = IntParameter;
explicit WireIntParameter(const IntParameter& parameter)
: parameter_(parameter) {}
size_t GetLengthOnWire() {
return quiche::ComputeLengthOnWire(
WireVarInt62(parameter_.type),
WireVarInt62(NeededVarIntLen(parameter_.value)),
WireVarInt62(parameter_.value));
}
absl::Status SerializeIntoWriter(quiche::QuicheDataWriter& writer) {
return quiche::SerializeIntoWriter(
writer, WireVarInt62(parameter_.type),
WireVarInt62(NeededVarIntLen(parameter_.value)),
WireVarInt62(parameter_.value));
}
private:
size_t NeededVarIntLen(const uint64_t value) {
return static_cast<size_t>(quic::QuicDataWriter::GetVarInt62Len(value));
}
const IntParameter& parameter_;
};
class WireSubscribeParameterList {
public:
explicit WireSubscribeParameterList(const MoqtSubscribeParameters& list)
: list_(list) {}
size_t GetLengthOnWire() {
auto string_parameters = StringParameters();
auto int_parameters = IntParameters();
return quiche::ComputeLengthOnWire(
WireVarInt62(string_parameters.size() + int_parameters.size()),
WireSpan<WireStringParameter>(string_parameters),
WireSpan<WireIntParameter>(int_parameters));
}
absl::Status SerializeIntoWriter(quiche::QuicheDataWriter& writer) {
auto string_parameters = StringParameters();
auto int_parameters = IntParameters();
return quiche::SerializeIntoWriter(
writer, WireVarInt62(string_parameters.size() + int_parameters.size()),
WireSpan<WireStringParameter>(string_parameters),
WireSpan<WireIntParameter>(int_parameters));
}
private:
absl::InlinedVector<StringParameter, 1> StringParameters() const {
absl::InlinedVector<StringParameter, 1> result;
if (list_.authorization_info.has_value()) {
result.push_back(
StringParameter(MoqtTrackRequestParameter::kAuthorizationInfo,
*list_.authorization_info));
}
return result;
}
absl::InlinedVector<IntParameter, 3> IntParameters() const {
absl::InlinedVector<IntParameter, 3> result;
if (list_.delivery_timeout.has_value()) {
QUICHE_DCHECK_GE(*list_.delivery_timeout, quic::QuicTimeDelta::Zero());
result.push_back(IntParameter(
MoqtTrackRequestParameter::kDeliveryTimeout,
static_cast<uint64_t>(list_.delivery_timeout->ToMilliseconds())));
}
if (list_.max_cache_duration.has_value()) {
QUICHE_DCHECK_GE(*list_.max_cache_duration, quic::QuicTimeDelta::Zero());
result.push_back(IntParameter(
MoqtTrackRequestParameter::kMaxCacheDuration,
static_cast<uint64_t>(list_.max_cache_duration->ToMilliseconds())));
}
if (list_.object_ack_window.has_value()) {
QUICHE_DCHECK_GE(*list_.object_ack_window, quic::QuicTimeDelta::Zero());
result.push_back(IntParameter(
MoqtTrackRequestParameter::kOackWindowSize,
static_cast<uint64_t>(list_.object_ack_window->ToMicroseconds())));
}
return result;
}
const MoqtSubscribeParameters& list_;
};
class WireFullTrackName {
public:
using DataType = FullTrackName;
WireFullTrackName(const FullTrackName& name, bool includes_name)
: name_(name), includes_name_(includes_name) {}
size_t GetLengthOnWire() {
return quiche::ComputeLengthOnWire(
WireVarInt62(num_elements()),
WireSpan<WireStringWithVarInt62Length, std::string>(name_.tuple()));
}
absl::Status SerializeIntoWriter(quiche::QuicheDataWriter& writer) {
return quiche::SerializeIntoWriter(
writer, WireVarInt62(num_elements()),
WireSpan<WireStringWithVarInt62Length, std::string>(name_.tuple()));
}
private:
size_t num_elements() const {
return includes_name_ ? (name_.tuple().size() - 1) : name_.tuple().size();
}
const FullTrackName& name_;
const bool includes_name_;
};
template <typename... Ts>
QuicheBuffer Serialize(Ts... data) {
absl::StatusOr<QuicheBuffer> buffer = quiche::SerializeIntoBuffer(
quiche::SimpleBufferAllocator::Get(), data...);
if (!buffer.ok()) {
QUICHE_BUG(moqt_failed_serialization)
<< "Failed to serialize MoQT frame: " << buffer.status();
return QuicheBuffer();
}
return *std::move(buffer);
}
WireUint8 WireDeliveryOrder(std::optional<MoqtDeliveryOrder> delivery_order) {
if (!delivery_order.has_value()) {
return WireUint8(0x00);
}
switch (*delivery_order) {
case MoqtDeliveryOrder::kAscending:
return WireUint8(0x01);
case MoqtDeliveryOrder::kDescending:
return WireUint8(0x02);
}
QUICHE_NOTREACHED();
return WireUint8(0xff);
}
uint64_t SignedVarintSerializedForm(int64_t value) {
if (value < 0) {
return ((-value) << 1) | 0x01;
}
return value << 1;
}
}
quiche::QuicheBuffer MoqtFramer::SerializeObjectHeader(
const MoqtObject& message, bool is_first_in_stream) {
if (!ValidateObjectMetadata(message)) {
QUIC_BUG(quic_bug_serialize_object_header_01)
<< "Object metadata is invalid";
return quiche::QuicheBuffer();
}
if (message.forwarding_preference == MoqtForwardingPreference::kDatagram) {
QUIC_BUG(quic_bug_serialize_object_header_02)
<< "Datagrams use SerializeObjectDatagram()";
return quiche::QuicheBuffer();
}
if (!is_first_in_stream) {
switch (message.forwarding_preference) {
case MoqtForwardingPreference::kTrack:
return (message.payload_length == 0)
? Serialize(WireVarInt62(message.group_id),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length),
WireVarInt62(message.object_status))
: Serialize(WireVarInt62(message.group_id),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length));
case MoqtForwardingPreference::kSubgroup:
return (message.payload_length == 0)
? Serialize(WireVarInt62(message.object_id),
WireVarInt62(message.payload_length),
WireVarInt62(static_cast<uint64_t>(
message.object_status)))
: Serialize(WireVarInt62(message.object_id),
WireVarInt62(message.payload_length));
default:
QUICHE_NOTREACHED();
return quiche::QuicheBuffer();
}
}
MoqtDataStreamType message_type =
GetMessageTypeForForwardingPreference(message.forwarding_preference);
switch (message.forwarding_preference) {
case MoqtForwardingPreference::kTrack:
return (message.payload_length == 0)
? Serialize(WireVarInt62(message_type),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.track_alias),
WireUint8(message.publisher_priority),
WireVarInt62(message.group_id),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length),
WireVarInt62(message.object_status))
: Serialize(WireVarInt62(message_type),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.track_alias),
WireUint8(message.publisher_priority),
WireVarInt62(message.group_id),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length));
case MoqtForwardingPreference::kSubgroup:
return (message.payload_length == 0)
? Serialize(WireVarInt62(message_type),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.track_alias),
WireVarInt62(message.group_id),
WireVarInt62(*message.subgroup_id),
WireUint8(message.publisher_priority),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length),
WireVarInt62(message.object_status))
: Serialize(WireVarInt62(message_type),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.track_alias),
WireVarInt62(message.group_id),
WireVarInt62(*message.subgroup_id),
WireUint8(message.publisher_priority),
WireVarInt62(message.object_id),
WireVarInt62(message.payload_length));
case MoqtForwardingPreference::kDatagram:
QUICHE_NOTREACHED();
return quiche::QuicheBuffer();
}
}
quiche::QuicheBuffer MoqtFramer::SerializeObjectDatagram(
const MoqtObject& message, absl::string_view payload) {
if (!ValidateObjectMetadata(message)) {
QUIC_BUG(quic_bug_serialize_object_datagram_01)
<< "Object metadata is invalid";
return quiche::QuicheBuffer();
}
if (message.forwarding_preference != MoqtForwardingPreference::kDatagram) {
QUIC_BUG(quic_bug_serialize_object_datagram_02)
<< "Only datagrams use SerializeObjectDatagram()";
return quiche::QuicheBuffer();
}
if (message.payload_length != payload.length()) {
QUIC_BUG(quic_bug_serialize_object_datagram_03)
<< "Payload length does not match payload";
return quiche::QuicheBuffer();
}
if (message.payload_length == 0) {
return Serialize(
WireVarInt62(MoqtDataStreamType::kObjectDatagram),
WireVarInt62(message.subscribe_id), WireVarInt62(message.track_alias),
WireVarInt62(message.group_id), WireVarInt62(message.object_id),
WireUint8(message.publisher_priority),
WireVarInt62(message.payload_length),
WireVarInt62(message.object_status));
}
return Serialize(
WireVarInt62(MoqtDataStreamType::kObjectDatagram),
WireVarInt62(message.subscribe_id), WireVarInt62(message.track_alias),
WireVarInt62(message.group_id), WireVarInt62(message.object_id),
WireUint8(message.publisher_priority),
WireVarInt62(message.payload_length), WireBytes(payload));
}
quiche::QuicheBuffer MoqtFramer::SerializeClientSetup(
const MoqtClientSetup& message) {
absl::InlinedVector<IntParameter, 1> int_parameters;
absl::InlinedVector<StringParameter, 1> string_parameters;
if (message.role.has_value()) {
int_parameters.push_back(
IntParameter(MoqtSetupParameter::kRole, *message.role));
}
if (message.max_subscribe_id.has_value()) {
int_parameters.push_back(IntParameter(MoqtSetupParameter::kMaxSubscribeId,
*message.max_subscribe_id));
}
if (message.supports_object_ack) {
int_parameters.push_back(
IntParameter(MoqtSetupParameter::kSupportObjectAcks, 1u));
}
if (!using_webtrans_ && message.path.has_value()) {
string_parameters.push_back(
StringParameter(MoqtSetupParameter::kPath, *message.path));
}
return Serialize(
WireVarInt62(MoqtMessageType::kClientSetup),
WireVarInt62(message.supported_versions.size()),
WireSpan<WireVarInt62, MoqtVersion>(message.supported_versions),
WireVarInt62(string_parameters.size() + int_parameters.size()),
WireSpan<WireIntParameter>(int_parameters),
WireSpan<WireStringParameter>(string_parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeServerSetup(
const MoqtServerSetup& message) {
absl::InlinedVector<IntParameter, 1> int_parameters;
if (message.role.has_value()) {
int_parameters.push_back(
IntParameter(MoqtSetupParameter::kRole, *message.role));
}
if (message.max_subscribe_id.has_value()) {
int_parameters.push_back(IntParameter(MoqtSetupParameter::kMaxSubscribeId,
*message.max_subscribe_id));
}
if (message.supports_object_ack) {
int_parameters.push_back(
IntParameter(MoqtSetupParameter::kSupportObjectAcks, 1u));
}
return Serialize(WireVarInt62(MoqtMessageType::kServerSetup),
WireVarInt62(message.selected_version),
WireVarInt62(int_parameters.size()),
WireSpan<WireIntParameter>(int_parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribe(
const MoqtSubscribe& message) {
MoqtFilterType filter_type = GetFilterType(message);
if (filter_type == MoqtFilterType::kNone) {
QUICHE_BUG(MoqtFramer_invalid_subscribe) << "Invalid object range";
return quiche::QuicheBuffer();
}
switch (filter_type) {
case MoqtFilterType::kLatestGroup:
case MoqtFilterType::kLatestObject:
return Serialize(
WireVarInt62(MoqtMessageType::kSubscribe),
WireVarInt62(message.subscribe_id), WireVarInt62(message.track_alias),
WireFullTrackName(message.full_track_name, true),
WireUint8(message.subscriber_priority),
WireDeliveryOrder(message.group_order), WireVarInt62(filter_type),
WireSubscribeParameterList(message.parameters));
case MoqtFilterType::kAbsoluteStart:
return Serialize(
WireVarInt62(MoqtMessageType::kSubscribe),
WireVarInt62(message.subscribe_id), WireVarInt62(message.track_alias),
WireFullTrackName(message.full_track_name, true),
WireUint8(message.subscriber_priority),
WireDeliveryOrder(message.group_order), WireVarInt62(filter_type),
WireVarInt62(*message.start_group),
WireVarInt62(*message.start_object),
WireSubscribeParameterList(message.parameters));
case MoqtFilterType::kAbsoluteRange:
return Serialize(
WireVarInt62(MoqtMessageType::kSubscribe),
WireVarInt62(message.subscribe_id), WireVarInt62(message.track_alias),
WireFullTrackName(message.full_track_name, true),
WireUint8(message.subscriber_priority),
WireDeliveryOrder(message.group_order), WireVarInt62(filter_type),
WireVarInt62(*message.start_group),
WireVarInt62(*message.start_object), WireVarInt62(*message.end_group),
WireVarInt62(message.end_object.has_value() ? *message.end_object + 1
: 0),
WireSubscribeParameterList(message.parameters));
default:
QUICHE_BUG(MoqtFramer_end_group_missing) << "Subscribe framing error.";
return quiche::QuicheBuffer();
}
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeOk(
const MoqtSubscribeOk& message) {
if (message.parameters.authorization_info.has_value()) {
QUICHE_BUG(MoqtFramer_invalid_subscribe_ok)
<< "SUBSCRIBE_OK with delivery timeout";
}
if (message.largest_id.has_value()) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeOk),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.expires.ToMilliseconds()),
WireDeliveryOrder(message.group_order), WireUint8(1),
WireVarInt62(message.largest_id->group),
WireVarInt62(message.largest_id->object),
WireSubscribeParameterList(message.parameters));
}
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeOk),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.expires.ToMilliseconds()),
WireDeliveryOrder(message.group_order), WireUint8(0),
WireSubscribeParameterList(message.parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeError(
const MoqtSubscribeError& message) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeError),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.error_code),
WireStringWithVarInt62Length(message.reason_phrase),
WireVarInt62(message.track_alias));
}
quiche::QuicheBuffer MoqtFramer::SerializeUnsubscribe(
const MoqtUnsubscribe& message) {
return Serialize(WireVarInt62(MoqtMessageType::kUnsubscribe),
WireVarInt62(message.subscribe_id));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeDone(
const MoqtSubscribeDone& message) {
if (message.final_id.has_value()) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeDone),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.status_code),
WireStringWithVarInt62Length(message.reason_phrase),
WireUint8(1), WireVarInt62(message.final_id->group),
WireVarInt62(message.final_id->object));
}
return Serialize(
WireVarInt62(MoqtMessageType::kSubscribeDone),
WireVarInt62(message.subscribe_id), WireVarInt62(message.status_code),
WireStringWithVarInt62Length(message.reason_phrase), WireUint8(0));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeUpdate(
const MoqtSubscribeUpdate& message) {
if (message.parameters.authorization_info.has_value()) {
QUICHE_BUG(MoqtFramer_invalid_subscribe_update)
<< "SUBSCRIBE_UPDATE with authorization info";
}
uint64_t end_group =
message.end_group.has_value() ? *message.end_group + 1 : 0;
uint64_t end_object =
message.end_object.has_value() ? *message.end_object + 1 : 0;
if (end_group == 0 && end_object != 0) {
QUICHE_BUG(MoqtFramer_invalid_subscribe_update) << "Invalid object range";
return quiche::QuicheBuffer();
}
return Serialize(
WireVarInt62(MoqtMessageType::kSubscribeUpdate),
WireVarInt62(message.subscribe_id), WireVarInt62(message.start_group),
WireVarInt62(message.start_object), WireVarInt62(end_group),
WireVarInt62(end_object), WireUint8(message.subscriber_priority),
WireSubscribeParameterList(message.parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeAnnounce(
const MoqtAnnounce& message) {
if (message.parameters.delivery_timeout.has_value()) {
QUICHE_BUG(MoqtFramer_invalid_announce) << "ANNOUNCE with delivery timeout";
}
return Serialize(
WireVarInt62(static_cast<uint64_t>(MoqtMessageType::kAnnounce)),
WireFullTrackName(message.track_namespace, false),
WireSubscribeParameterList(message.parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeAnnounceOk(
const MoqtAnnounceOk& message) {
return Serialize(WireVarInt62(MoqtMessageType::kAnnounceOk),
WireFullTrackName(message.track_namespace, false));
}
quiche::QuicheBuffer MoqtFramer::SerializeAnnounceError(
const MoqtAnnounceError& message) {
return Serialize(WireVarInt62(MoqtMessageType::kAnnounceError),
WireFullTrackName(message.track_namespace, false),
WireVarInt62(message.error_code),
WireStringWithVarInt62Length(message.reason_phrase));
}
quiche::QuicheBuffer MoqtFramer::SerializeAnnounceCancel(
const MoqtAnnounceCancel& message) {
return Serialize(WireVarInt62(MoqtMessageType::kAnnounceCancel),
WireFullTrackName(message.track_namespace, false),
WireVarInt62(message.error_code),
WireStringWithVarInt62Length(message.reason_phrase));
}
quiche::QuicheBuffer MoqtFramer::SerializeTrackStatusRequest(
const MoqtTrackStatusRequest& message) {
return Serialize(WireVarInt62(MoqtMessageType::kTrackStatusRequest),
WireFullTrackName(message.full_track_name, true));
}
quiche::QuicheBuffer MoqtFramer::SerializeUnannounce(
const MoqtUnannounce& message) {
return Serialize(WireVarInt62(MoqtMessageType::kUnannounce),
WireFullTrackName(message.track_namespace, false));
}
quiche::QuicheBuffer MoqtFramer::SerializeTrackStatus(
const MoqtTrackStatus& message) {
return Serialize(WireVarInt62(MoqtMessageType::kTrackStatus),
WireFullTrackName(message.full_track_name, true),
WireVarInt62(message.status_code),
WireVarInt62(message.last_group),
WireVarInt62(message.last_object));
}
quiche::QuicheBuffer MoqtFramer::SerializeGoAway(const MoqtGoAway& message) {
return Serialize(WireVarInt62(MoqtMessageType::kGoAway),
WireStringWithVarInt62Length(message.new_session_uri));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeNamespace(
const MoqtSubscribeNamespace& message) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeNamespace),
WireFullTrackName(message.track_namespace, false),
WireSubscribeParameterList(message.parameters));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeNamespaceOk(
const MoqtSubscribeNamespaceOk& message) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeNamespaceOk),
WireFullTrackName(message.track_namespace, false));
}
quiche::QuicheBuffer MoqtFramer::SerializeSubscribeNamespaceError(
const MoqtSubscribeNamespaceError& message) {
return Serialize(WireVarInt62(MoqtMessageType::kSubscribeNamespaceError),
WireFullTrackName(message.track_namespace, false),
WireVarInt62(message.error_code),
WireStringWithVarInt62Length(message.reason_phrase));
}
quiche::QuicheBuffer MoqtFramer::SerializeUnsubscribeNamespace(
const MoqtUnsubscribeNamespace& message) {
return Serialize(WireVarInt62(MoqtMessageType::kUnsubscribeNamespace),
WireFullTrackName(message.track_namespace, false));
}
quiche::QuicheBuffer MoqtFramer::SerializeMaxSubscribeId(
const MoqtMaxSubscribeId& message) {
return Serialize(WireVarInt62(MoqtMessageType::kMaxSubscribeId),
WireVarInt62(message.max_subscribe_id));
}
quiche::QuicheBuffer MoqtFramer::SerializeObjectAck(
const MoqtObjectAck& message) {
return Serialize(WireVarInt62(MoqtMessageType::kObjectAck),
WireVarInt62(message.subscribe_id),
WireVarInt62(message.group_id),
WireVarInt62(message.object_id),
WireVarInt62(SignedVarintSerializedForm(
message.delta_from_deadline.ToMicroseconds())));
}
bool MoqtFramer::ValidateObjectMetadata(const MoqtObject& object) {
if (object.object_status != MoqtObjectStatus::kNormal &&
object.payload_length > 0) {
return false;
}
if ((object.forwarding_preference == MoqtForwardingPreference::kSubgroup) !=
object.subgroup_id.has_value()) {
return false;
}
return true;
}
} | #include "quiche/quic/moqt/moqt_framer.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/test_tools/moqt_test_message.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace moqt::test {
struct MoqtFramerTestParams {
MoqtFramerTestParams(MoqtMessageType message_type, bool uses_web_transport)
: message_type(message_type), uses_web_transport(uses_web_transport) {}
MoqtMessageType message_type;
bool uses_web_transport;
};
std::vector<MoqtFramerTestParams> GetMoqtFramerTestParams() {
std::vector<MoqtFramerTestParams> params;
std::vector<MoqtMessageType> message_types = {
MoqtMessageType::kSubscribe,
MoqtMessageType::kSubscribeOk,
MoqtMessageType::kSubscribeError,
MoqtMessageType::kUnsubscribe,
MoqtMessageType::kSubscribeDone,
MoqtMessageType::kAnnounceCancel,
MoqtMessageType::kTrackStatusRequest,
MoqtMessageType::kTrackStatus,
MoqtMessageType::kAnnounce,
MoqtMessageType::kAnnounceOk,
MoqtMessageType::kAnnounceError,
MoqtMessageType::kUnannounce,
MoqtMessageType::kGoAway,
MoqtMessageType::kSubscribeNamespace,
MoqtMessageType::kSubscribeNamespaceOk,
MoqtMessageType::kSubscribeNamespaceError,
MoqtMessageType::kUnsubscribeNamespace,
MoqtMessageType::kMaxSubscribeId,
MoqtMessageType::kObjectAck,
MoqtMessageType::kClientSetup,
MoqtMessageType::kServerSetup,
};
for (const MoqtMessageType message_type : message_types) {
if (message_type == MoqtMessageType::kClientSetup) {
for (const bool uses_web_transport : {false, true}) {
params.push_back(
MoqtFramerTestParams(message_type, uses_web_transport));
}
} else {
params.push_back(MoqtFramerTestParams(message_type, true));
}
}
return params;
}
std::string ParamNameFormatter(
const testing::TestParamInfo<MoqtFramerTestParams>& info) {
return MoqtMessageTypeToString(info.param.message_type) + "_" +
(info.param.uses_web_transport ? "WebTransport" : "QUIC");
}
quiche::QuicheBuffer SerializeObject(MoqtFramer& framer,
const MoqtObject& message,
absl::string_view payload,
bool is_first_in_stream) {
MoqtObject adjusted_message = message;
adjusted_message.payload_length = payload.size();
quiche::QuicheBuffer header =
(message.forwarding_preference == MoqtForwardingPreference::kDatagram)
? framer.SerializeObjectDatagram(adjusted_message, payload)
: framer.SerializeObjectHeader(adjusted_message, is_first_in_stream);
if (header.empty()) {
return quiche::QuicheBuffer();
}
return quiche::QuicheBuffer::Copy(
quiche::SimpleBufferAllocator::Get(),
absl::StrCat(header.AsStringView(), payload));
}
class MoqtFramerTest
: public quic::test::QuicTestWithParam<MoqtFramerTestParams> {
public:
MoqtFramerTest()
: message_type_(GetParam().message_type),
webtrans_(GetParam().uses_web_transport),
buffer_allocator_(quiche::SimpleBufferAllocator::Get()),
framer_(buffer_allocator_, GetParam().uses_web_transport) {}
std::unique_ptr<TestMessageBase> MakeMessage(MoqtMessageType message_type) {
return CreateTestMessage(message_type, webtrans_);
}
quiche::QuicheBuffer SerializeMessage(
TestMessageBase::MessageStructuredData& structured_data) {
switch (message_type_) {
case MoqtMessageType::kSubscribe: {
auto data = std::get<MoqtSubscribe>(structured_data);
return framer_.SerializeSubscribe(data);
}
case MoqtMessageType::kSubscribeOk: {
auto data = std::get<MoqtSubscribeOk>(structured_data);
return framer_.SerializeSubscribeOk(data);
}
case MoqtMessageType::kSubscribeError: {
auto data = std::get<MoqtSubscribeError>(structured_data);
return framer_.SerializeSubscribeError(data);
}
case MoqtMessageType::kUnsubscribe: {
auto data = std::get<MoqtUnsubscribe>(structured_data);
return framer_.SerializeUnsubscribe(data);
}
case MoqtMessageType::kSubscribeDone: {
auto data = std::get<MoqtSubscribeDone>(structured_data);
return framer_.SerializeSubscribeDone(data);
}
case MoqtMessageType::kAnnounce: {
auto data = std::get<MoqtAnnounce>(structured_data);
return framer_.SerializeAnnounce(data);
}
case moqt::MoqtMessageType::kAnnounceOk: {
auto data = std::get<MoqtAnnounceOk>(structured_data);
return framer_.SerializeAnnounceOk(data);
}
case moqt::MoqtMessageType::kAnnounceError: {
auto data = std::get<MoqtAnnounceError>(structured_data);
return framer_.SerializeAnnounceError(data);
}
case moqt::MoqtMessageType::kAnnounceCancel: {
auto data = std::get<MoqtAnnounceCancel>(structured_data);
return framer_.SerializeAnnounceCancel(data);
}
case moqt::MoqtMessageType::kTrackStatusRequest: {
auto data = std::get<MoqtTrackStatusRequest>(structured_data);
return framer_.SerializeTrackStatusRequest(data);
}
case MoqtMessageType::kUnannounce: {
auto data = std::get<MoqtUnannounce>(structured_data);
return framer_.SerializeUnannounce(data);
}
case moqt::MoqtMessageType::kTrackStatus: {
auto data = std::get<MoqtTrackStatus>(structured_data);
return framer_.SerializeTrackStatus(data);
}
case moqt::MoqtMessageType::kGoAway: {
auto data = std::get<MoqtGoAway>(structured_data);
return framer_.SerializeGoAway(data);
}
case moqt::MoqtMessageType::kSubscribeNamespace: {
auto data = std::get<MoqtSubscribeNamespace>(structured_data);
return framer_.SerializeSubscribeNamespace(data);
}
case moqt::MoqtMessageType::kSubscribeNamespaceOk: {
auto data = std::get<MoqtSubscribeNamespaceOk>(structured_data);
return framer_.SerializeSubscribeNamespaceOk(data);
}
case moqt::MoqtMessageType::kSubscribeNamespaceError: {
auto data = std::get<MoqtSubscribeNamespaceError>(structured_data);
return framer_.SerializeSubscribeNamespaceError(data);
}
case moqt::MoqtMessageType::kUnsubscribeNamespace: {
auto data = std::get<MoqtUnsubscribeNamespace>(structured_data);
return framer_.SerializeUnsubscribeNamespace(data);
}
case moqt::MoqtMessageType::kMaxSubscribeId: {
auto data = std::get<MoqtMaxSubscribeId>(structured_data);
return framer_.SerializeMaxSubscribeId(data);
}
case moqt::MoqtMessageType::kObjectAck: {
auto data = std::get<MoqtObjectAck>(structured_data);
return framer_.SerializeObjectAck(data);
}
case MoqtMessageType::kClientSetup: {
auto data = std::get<MoqtClientSetup>(structured_data);
return framer_.SerializeClientSetup(data);
}
case MoqtMessageType::kServerSetup: {
auto data = std::get<MoqtServerSetup>(structured_data);
return framer_.SerializeServerSetup(data);
}
default:
return quiche::QuicheBuffer();
}
}
MoqtMessageType message_type_;
bool webtrans_;
quiche::SimpleBufferAllocator* buffer_allocator_;
MoqtFramer framer_;
};
INSTANTIATE_TEST_SUITE_P(MoqtFramerTests, MoqtFramerTest,
testing::ValuesIn(GetMoqtFramerTestParams()),
ParamNameFormatter);
TEST_P(MoqtFramerTest, OneMessage) {
auto message = MakeMessage(message_type_);
auto structured_data = message->structured_data();
auto buffer = SerializeMessage(structured_data);
EXPECT_EQ(buffer.size(), message->total_message_size());
quiche::test::CompareCharArraysWithHexError(
"frame encoding", buffer.data(), buffer.size(),
message->PacketSample().data(), message->PacketSample().size());
}
class MoqtFramerSimpleTest : public quic::test::QuicTest {
public:
MoqtFramerSimpleTest()
: buffer_allocator_(quiche::SimpleBufferAllocator::Get()),
framer_(buffer_allocator_, true) {}
quiche::SimpleBufferAllocator* buffer_allocator_;
MoqtFramer framer_;
const uint8_t* BufferAtOffset(quiche::QuicheBuffer& buffer, size_t offset) {
const char* data = buffer.data();
return reinterpret_cast<const uint8_t*>(data + offset);
}
};
TEST_F(MoqtFramerSimpleTest, GroupMiddler) {
auto header = std::make_unique<StreamHeaderSubgroupMessage>();
auto buffer1 = SerializeObject(
framer_, std::get<MoqtObject>(header->structured_data()), "foo", true);
EXPECT_EQ(buffer1.size(), header->total_message_size());
EXPECT_EQ(buffer1.AsStringView(), header->PacketSample());
auto middler = std::make_unique<StreamMiddlerSubgroupMessage>();
auto buffer2 = SerializeObject(
framer_, std::get<MoqtObject>(middler->structured_data()), "bar", false);
EXPECT_EQ(buffer2.size(), middler->total_message_size());
EXPECT_EQ(buffer2.AsStringView(), middler->PacketSample());
}
TEST_F(MoqtFramerSimpleTest, TrackMiddler) {
auto header = std::make_unique<StreamHeaderTrackMessage>();
auto buffer1 = SerializeObject(
framer_, std::get<MoqtObject>(header->structured_data()), "foo", true);
EXPECT_EQ(buffer1.size(), header->total_message_size());
EXPECT_EQ(buffer1.AsStringView(), header->PacketSample());
auto middler = std::make_unique<StreamMiddlerTrackMessage>();
auto buffer2 = SerializeObject(
framer_, std::get<MoqtObject>(middler->structured_data()), "bar", false);
EXPECT_EQ(buffer2.size(), middler->total_message_size());
EXPECT_EQ(buffer2.AsStringView(), middler->PacketSample());
}
TEST_F(MoqtFramerSimpleTest, BadObjectInput) {
MoqtObject object = {
3,
4,
5,
6,
7,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kSubgroup,
8,
3,
};
quiche::QuicheBuffer buffer;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectDatagram(object, "foo"),
"Only datagrams use SerializeObjectDatagram()");
EXPECT_TRUE(buffer.empty());
object.subgroup_id = std::nullopt;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectHeader(object, false),
"Object metadata is invalid");
EXPECT_TRUE(buffer.empty());
object.subgroup_id = 8;
object.forwarding_preference = MoqtForwardingPreference::kTrack;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectHeader(object, false),
"Object metadata is invalid");
EXPECT_TRUE(buffer.empty());
object.forwarding_preference = MoqtForwardingPreference::kSubgroup;
object.object_status = MoqtObjectStatus::kEndOfGroup;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectHeader(object, false),
"Object metadata is invalid");
EXPECT_TRUE(buffer.empty());
}
TEST_F(MoqtFramerSimpleTest, BadDatagramInput) {
MoqtObject object = {
3,
4,
5,
6,
7,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kDatagram,
std::nullopt,
3,
};
quiche::QuicheBuffer buffer;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectHeader(object, false),
"Datagrams use SerializeObjectDatagram()")
EXPECT_TRUE(buffer.empty());
object.object_status = MoqtObjectStatus::kEndOfGroup;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectDatagram(object, "foo"),
"Object metadata is invalid");
EXPECT_TRUE(buffer.empty());
object.object_status = MoqtObjectStatus::kNormal;
object.subgroup_id = 8;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectDatagram(object, "foo"),
"Object metadata is invalid");
EXPECT_TRUE(buffer.empty());
object.subgroup_id = std::nullopt;
EXPECT_QUIC_BUG(buffer = framer_.SerializeObjectDatagram(object, "foobar"),
"Payload length does not match payload");
EXPECT_TRUE(buffer.empty());
}
TEST_F(MoqtFramerSimpleTest, Datagram) {
auto datagram = std::make_unique<ObjectDatagramMessage>();
MoqtObject object = {
3,
4,
5,
6,
7,
MoqtObjectStatus::kNormal,
MoqtForwardingPreference::kDatagram,
std::nullopt,
3,
};
std::string payload = "foo";
quiche::QuicheBuffer buffer;
buffer = framer_.SerializeObjectDatagram(object, payload);
EXPECT_EQ(buffer.size(), datagram->total_message_size());
EXPECT_EQ(buffer.AsStringView(), datagram->PacketSample());
}
TEST_F(MoqtFramerSimpleTest, AllSubscribeInputs) {
for (std::optional<uint64_t> start_group :
{std::optional<uint64_t>(), std::optional<uint64_t>(4)}) {
for (std::optional<uint64_t> start_object :
{std::optional<uint64_t>(), std::optional<uint64_t>(0)}) {
for (std::optional<uint64_t> end_group :
{std::optional<uint64_t>(), std::optional<uint64_t>(7)}) {
for (std::optional<uint64_t> end_object :
{std::optional<uint64_t>(), std::optional<uint64_t>(3)}) {
MoqtSubscribe subscribe = {
3,
4,
FullTrackName({"foo", "abcd"}),
0x20,
std::nullopt,
start_group,
start_object,
end_group,
end_object,
MoqtSubscribeParameters{"bar", std::nullopt, std::nullopt,
std::nullopt},
};
quiche::QuicheBuffer buffer;
MoqtFilterType expected_filter_type = MoqtFilterType::kNone;
if (!start_group.has_value() && !start_object.has_value() &&
!end_group.has_value() && !end_object.has_value()) {
expected_filter_type = MoqtFilterType::kLatestObject;
} else if (!start_group.has_value() && start_object.has_value() &&
*start_object == 0 && !end_group.has_value() &&
!end_object.has_value()) {
expected_filter_type = MoqtFilterType::kLatestGroup;
} else if (start_group.has_value() && start_object.has_value() &&
!end_group.has_value() && !end_object.has_value()) {
expected_filter_type = MoqtFilterType::kAbsoluteStart;
} else if (start_group.has_value() && start_object.has_value() &&
end_group.has_value()) {
expected_filter_type = MoqtFilterType::kAbsoluteRange;
}
if (expected_filter_type == MoqtFilterType::kNone) {
EXPECT_QUIC_BUG(buffer = framer_.SerializeSubscribe(subscribe),
"Invalid object range");
EXPECT_EQ(buffer.size(), 0);
continue;
}
buffer = framer_.SerializeSubscribe(subscribe);
const uint8_t* read = BufferAtOffset(buffer, 15);
EXPECT_EQ(static_cast<MoqtFilterType>(*read), expected_filter_type);
EXPECT_GT(buffer.size(), 0);
if (expected_filter_type == MoqtFilterType::kAbsoluteRange &&
end_object.has_value()) {
const uint8_t* object_id = read + 4;
EXPECT_EQ(*object_id, *end_object + 1);
}
}
}
}
}
}
TEST_F(MoqtFramerSimpleTest, SubscribeEndBeforeStart) {
MoqtSubscribe subscribe = {
3,
4,
FullTrackName({"foo", "abcd"}),
0x20,
std::nullopt,
std::optional<uint64_t>(4),
std::optional<uint64_t>(3),
std::optional<uint64_t>(3),
std::nullopt,
MoqtSubscribeParameters{"bar", std::nullopt, std::nullopt, std::nullopt},
};
quiche::QuicheBuffer buffer;
EXPECT_QUIC_BUG(buffer = framer_.SerializeSubscribe(subscribe),
"Invalid object range");
EXPECT_EQ(buffer.size(), 0);
subscribe.end_group = 4;
subscribe.end_object = 1;
EXPECT_QUIC_BUG(buffer = framer_.SerializeSubscribe(subscribe),
"Invalid object range");
EXPECT_EQ(buffer.size(), 0);
}
TEST_F(MoqtFramerSimpleTest, SubscribeLatestGroupNonzeroObject) {
MoqtSubscribe subscribe = {
3,
4,
FullTrackName({"foo", "abcd"}),
0x20,
std::nullopt,
std::nullopt,
std::optional<uint64_t>(3),
std::nullopt,
std::nullopt,
MoqtSubscribeParameters{"bar", std::nullopt, std::nullopt, std::nullopt},
};
quiche::QuicheBuffer buffer;
EXPECT_QUIC_BUG(buffer = framer_.SerializeSubscribe(subscribe),
"Invalid object range");
EXPECT_EQ(buffer.size(), 0);
}
TEST_F(MoqtFramerSimpleTest, SubscribeUpdateEndGroupOnly) {
MoqtSubscribeUpdate subscribe_update = {
3,
4,
3,
4,
std::nullopt,
0xaa,
MoqtSubscribeParameters{std::nullopt, std::nullopt, std::nullopt,
std::nullopt},
};
quiche::QuicheBuffer buffer;
buffer = framer_.SerializeSubscribeUpdate(subscribe_update);
EXPECT_GT(buffer.size(), 0);
const uint8_t* end_group = BufferAtOffset(buffer, 4);
EXPECT_EQ(*end_group, 5);
const uint8_t* end_object = end_group + 1;
EXPECT_EQ(*end_object, 0);
}
TEST_F(MoqtFramerSimpleTest, SubscribeUpdateIncrementsEnd) {
MoqtSubscribeUpdate subscribe_update = {
3,
4,
3,
4,
6,
0xaa,
MoqtSubscribeParameters{std::nullopt, std::nullopt, std::nullopt,
std::nullopt},
};
quiche::QuicheBuffer buffer;
buffer = framer_.SerializeSubscribeUpdate(subscribe_update);
EXPECT_GT(buffer.size(), 0);
const uint8_t* end_group = BufferAtOffset(buffer, 4);
EXPECT_EQ(*end_group, 5);
const uint8_t* end_object = end_group + 1;
EXPECT_EQ(*end_object, 7);
}
TEST_F(MoqtFramerSimpleTest, SubscribeUpdateInvalidRange) {
MoqtSubscribeUpdate subscribe_update = {
3,
4,
3,
std::nullopt,
6,
0xaa,
MoqtSubscribeParameters{std::nullopt, std::nullopt, std::nullopt,
std::nullopt},
};
quiche::QuicheBuffer buffer;
EXPECT_QUIC_BUG(buffer = framer_.SerializeSubscribeUpdate(subscribe_update),
"Invalid object range");
EXPECT_EQ(buffer.size(), 0);
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_framer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_framer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
12911963-3d86-43d4-9313-02b2ed479c8a | cpp | tensorflow/tensorflow | shape_optimizer | tensorflow/core/grappler/optimizers/shape_optimizer.cc | tensorflow/core/grappler/optimizers/shape_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace grappler {
Status ShapeOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
bool can_optimize = false;
bool has_div = false;
bool has_size = false;
bool has_shape = false;
bool has_prod = false;
auto is_int = [](const NodeDef& node) -> bool {
return node.attr().at("T").type() == DT_INT32 ||
node.attr().at("T").type() == DT_INT64;
};
for (const NodeDef& node : item.graph.node()) {
if (IsShape(node)) {
has_shape = true;
} else if (IsProd(node) && is_int(node)) {
has_prod = true;
} else if (IsDiv(node) && is_int(node)) {
has_div = true;
} else if (IsSize(node)) {
has_size = true;
}
if ((has_shape && has_prod) || (has_div && has_size)) {
can_optimize = true;
break;
}
}
if (!can_optimize) {
return absl::AbortedError("Nothing to do.");
}
*optimized_graph = item.graph;
GraphProperties properties(item);
bool inferred_properties = false;
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (!IsShape(node)) {
continue;
}
for (MutableGraphView::InputPort fanout :
graph.GetFanout(MutableGraphView::OutputPort(&node, 0))) {
if (fanout.node->op() != "Prod") {
continue;
}
if (fanout.node->attr().count("keep_dims") != 0 &&
fanout.node->attr().at("keep_dims").b()) {
continue;
}
const MutableGraphView::OutputPort reduce_indices =
graph.GetRegularFanin(MutableGraphView::InputPort(fanout.node, 1));
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop =
properties.GetOutputProperties(reduce_indices.node->name());
const int prop_size = prop.size();
if (prop_size <= reduce_indices.port_id) {
continue;
}
const TensorShapeProto& reduction_indices_shape =
prop[reduce_indices.port_id].shape();
if (NumCoefficients(reduction_indices_shape) == 1) {
const auto& input_props = properties.GetInputProperties(node.name());
if (input_props.size() != 1) {
continue;
}
NodeDef size_node(*fanout.node);
const DataType type = input_props[0].dtype();
size_node.set_op("Size");
size_node.set_input(0, node.input(0));
size_node.set_input(1, AsControlDependency(node));
size_node.mutable_attr()->erase("Tidx");
size_node.mutable_attr()->erase("keep_dims");
(*size_node.mutable_attr())["out_type"] = fanout.node->attr().at("T");
(*size_node.mutable_attr())["T"].set_type(type);
size_node.set_device(node.device());
Status s = IsKernelRegisteredForNode(size_node);
if (!s.ok()) {
continue;
}
fanout.node->Swap(&size_node);
}
}
}
}
{
MutableGraphView graph(optimized_graph);
for (auto& node : *optimized_graph->mutable_node()) {
if (node.op() == "Div") {
const MutableGraphView::OutputPort input1 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 0));
const MutableGraphView::OutputPort input2 =
graph.GetRegularFanin(MutableGraphView::InputPort(&node, 1));
if (input1.node == nullptr || input2.node == nullptr) continue;
if (!IsSize(*input1.node) || !IsSize(*input2.node)) {
continue;
}
if (!inferred_properties) {
TF_RETURN_IF_ERROR(
properties.InferStatically(false,
false,
false));
inferred_properties = true;
}
const auto& prop1 = properties.GetInputProperties(input1.node->name());
const auto& prop2 = properties.GetInputProperties(input2.node->name());
if (prop1.size() != 1 || prop2.size() != 1) {
continue;
}
const TensorShapeProto& shape1 = prop1[0].shape();
const TensorShapeProto& shape2 = prop2[0].shape();
int64_t result = ComputeSizeRatio(shape1, shape2);
if (result >= 0) {
node.set_op("Const");
DataType dtype = node.attr().at("T").type();
node.mutable_attr()->erase("T");
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorProto* t = (*node.mutable_attr())["value"].mutable_tensor();
t->set_dtype(dtype);
*t->mutable_tensor_shape() = TensorShapeProto();
if (dtype == DT_INT32) {
t->add_int_val(result);
} else {
t->add_int64_val(result);
}
node.set_input(0, AsControlDependency(node.input(0)));
node.set_input(1, AsControlDependency(node.input(1)));
}
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/shape_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class ShapeOptimizerTest : public GrapplerTest {};
TEST_F(ShapeOptimizerTest, OptimizeShapeProduct) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithOpName("e"), c, d, attrs.KeepDims(false));
Output f = ops::ReduceProd(s.WithOpName("f"), c, d, attrs.KeepDims(true));
GrapplerItem item;
item.fetch = {"e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
} else if (node.name() == "f") {
found++;
EXPECT_EQ("Prod", node.op());
EXPECT_EQ("c", node.input(0));
}
}
EXPECT_EQ(2, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
EXPECT_NEAR(tensors_expected[1].scalar<int>()(),
tensors_actual[1].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeProductMissingKernel) {
{
std::vector<std::unique_ptr<Device>> devices;
SessionOptions session_options;
session_options.config.mutable_gpu_options()
->set_per_process_gpu_memory_fraction(0.1);
session_options.env = Env::Default();
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(session_options, "", &devices));
bool found_gpu = false;
for (const auto& d : devices) {
if (d->device_type() == DEVICE_GPU) {
found_gpu = true;
break;
}
}
if (!found_gpu) {
LOG(INFO) << "Skipping test that requires GPU.";
return;
}
}
tensorflow::Scope s = tensorflow::Scope::NewRootScope().WithDevice("/cpu:0");
Output a = ops::Const(s.WithOpName("a"), string("Hello"), {32, 16});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
ops::ReduceProd::Attrs attrs;
Output e = ops::ReduceProd(s.WithDevice("/gpu:0").WithOpName("e"), c, d,
attrs.KeepDims(false));
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Size", node.op());
EXPECT_EQ("a", node.input(0));
EXPECT_EQ("/cpu:0", node.device());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
TEST_F(ShapeOptimizerTest, OptimizeShapeRatio) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 3.14f, {32, 32});
Output b = ops::Const(s.WithOpName("b"), 3.14f, {32, 16});
Output c = ops::Size(s.WithOpName("c"), a);
Output d = ops::Size(s.WithOpName("d"), b);
Output e = ops::Div(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
ShapeOptimizer optimizer;
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "e") {
found++;
EXPECT_EQ("Const", node.op());
}
}
EXPECT_EQ(1, found);
auto tensors_actual = EvaluateNodes(output, item.fetch);
EXPECT_NEAR(tensors_expected[0].scalar<int>()(),
tensors_actual[0].scalar<int>()(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/shape_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/shape_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
899f6ba5-fec6-45a3-9e07-481d7214b88d | cpp | google/arolla | text | arolla/util/text.cc | arolla/util/text_test.cc | #include "arolla/util/text.h"
#include <cstddef>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
absl::string_view Utf8CopyFirstNCodePoints(size_t n, absl::string_view data) {
size_t offset = 0;
for (; n > 0 && offset < data.size(); --n) {
const auto byte = data[offset];
if ((byte & 0x80) == 0) {
offset += 1;
} else if ((byte & 0xe0) == 0xc0) {
offset += 2;
} else if ((byte & 0xf0) == 0xe0) {
offset += 3;
} else if ((byte & 0xf8) == 0xf0) {
offset += 4;
} else {
offset += 1;
}
}
return data.substr(0, offset);
}
}
ReprToken ReprTraits<Text>::operator()(const Text& value) const {
constexpr size_t kTextAbbrevLimit = 120;
ReprToken result;
auto text = value.view();
auto prefix = Utf8CopyFirstNCodePoints(kTextAbbrevLimit, text);
if (prefix.size() == text.size()) {
result.str = absl::StrCat("'", absl::Utf8SafeCHexEscape(text), "'");
} else {
result.str = absl::StrCat("'", absl::Utf8SafeCHexEscape(prefix),
"... (TEXT of ", text.size(), " bytes total)'");
}
return result;
}
void FingerprintHasherTraits<Text>::operator()(FingerprintHasher* hasher,
const Text& value) const {
hasher->Combine(value.view());
}
} | #include "arolla/util/text.h"
#include <string>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
using ::testing::Eq;
using ::testing::MatchesRegex;
TEST(TextTest, Constructor) {
EXPECT_THAT(Text("Hello").view(), Eq("Hello"));
std::string hello = "Hello";
EXPECT_THAT(Text(hello).view(), Eq("Hello"));
absl::string_view hello_view = hello;
EXPECT_THAT(Text(hello_view).view(), Eq("Hello"));
absl::Cord hello_cord(hello);
EXPECT_THAT(Text(hello_cord).view(), Eq("Hello"));
}
TEST(TextTest, CopyAndMoveConstructors) {
static_assert(std::is_nothrow_move_constructible<Text>::value);
Text src("Google");
Text copied(src);
EXPECT_THAT(copied, Eq(src));
Text moved(std::move(src));
EXPECT_THAT(moved, Eq(copied));
}
TEST(TextTest, CopyAndMoveAssignment) {
static_assert(std::is_nothrow_move_assignable<Text>::value);
Text src("Google");
Text copied = src;
EXPECT_THAT(copied, Eq(src));
Text moved = std::move(src);
EXPECT_THAT(moved, Eq(copied));
}
TEST(TextTest, AssignmentFromString) {
std::string google = "Google";
{
Text val("x");
val = "Google";
EXPECT_THAT(val.view(), Eq(google));
}
{
Text val("x");
val = google;
EXPECT_THAT(val.view(), Eq(google));
}
{
absl::string_view google_view = google;
Text val("x");
val = google_view;
EXPECT_THAT(val.view(), Eq("Google"));
}
{
absl::Cord google_cord(google);
Text val("x");
val = google_cord;
EXPECT_THAT(val.view(), Eq("Google"));
}
{
Text val("x");
val = std::move(google);
EXPECT_THAT(val.view(), Eq("Google"));
}
}
TEST(TextTest, Repr) {
EXPECT_THAT(
GenReprToken(
Text("\"\xe8\xb0\xb7\xe6\xad\x8c\" is Google\'s Chinese name\n")),
ReprTokenEq(
"'\\\"\xe8\xb0\xb7\xe6\xad\x8c\\\" is Google\\'s Chinese name\\n'"));
const std::string pattern =
"A"
"\xc3\x86"
"\xe0\xa0\x80"
"\xf0\x90\x80\x80";
std::string data = pattern;
for (int i = 0; i < 8; ++i) {
data += data;
}
EXPECT_THAT(Repr(Text(data)),
MatchesRegex("'(" + pattern +
"){30}[.]{3} \\(TEXT of 2560 bytes total\\)'"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/text.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/text_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
4f271eb2-947f-476c-bd58-be3d6c7ffa56 | cpp | tensorflow/tensorflow | linalg_grad | tensorflow/cc/gradients/linalg_grad.cc | tensorflow/cc/gradients/linalg_grad_test.cc | #include <algorithm>
#include <cmath>
#include <string>
#include <tuple>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_helper.h"
#include "tensorflow/cc/ops/array_ops_internal.h"
#include "tensorflow/cc/ops/math_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
constexpr absl::string_view kEllipsis = "...";
absl::optional<int> EinsumGetAxisFromLabel(absl::string_view subscripts,
char label) {
std::vector<absl::string_view> splits = absl::StrSplit(subscripts, kEllipsis);
auto index = splits[0].find(label);
if (index != splits[0].npos) {
return index;
}
if (splits.size() < 2) {
return absl::nullopt;
}
index = splits[1].find(label);
if (index != splits[1].npos) {
return index - splits[1].length();
}
return absl::nullopt;
}
std::tuple<int, absl::optional<int>> EinsumGetBcastSubshape(
absl::string_view subscripts) {
int start = subscripts.find(kEllipsis);
if (start == subscripts.npos) {
return std::make_tuple(0, 0);
}
int remaining = subscripts.length() - (start + kEllipsis.length());
absl::optional<int> end;
if (remaining > 0) {
end = -remaining;
} else {
end = absl::nullopt;
}
return std::make_tuple(start, end);
}
Output Slice1dHelper(const Scope& scope, Output tensor, int start,
absl::optional<int> end) {
if (end.has_value() && *end > 0) {
return Slice(scope, tensor, Const(scope, start, TensorShape({1})),
Const(scope, *end - start, TensorShape({1})));
} else {
return Slice(scope, tensor, Const(scope, start, TensorShape({1})),
Add(scope, Shape(scope, tensor), end.value_or(0) - start));
}
}
std::tuple<std::string, Output, Output> EinsumGetReducedSubscripts(
const Scope& scope, const absl::btree_set<char>& reduced_label_set,
Output input_shape, absl::string_view subscripts) {
const std::string reduced_subs =
std::string(reduced_label_set.begin(), reduced_label_set.end());
std::vector<int> reduced_axes;
reduced_axes.reserve(reduced_subs.size());
for (const char s : reduced_subs) {
auto axis = EinsumGetAxisFromLabel(subscripts, s);
if (!axis.has_value()) {
scope.UpdateStatus(errors::Internal(
absl::StrCat("Missing axis", absl::string_view(&s, 1))));
} else {
reduced_axes.push_back(*axis);
}
}
std::vector<Output> reduced_dims_inputs;
reduced_dims_inputs.reserve(reduced_axes.size());
for (const int i : reduced_axes) {
if (i < 0) {
reduced_dims_inputs.push_back(
Gather(scope, input_shape, Add(scope, Size(scope, input_shape), i)));
} else {
reduced_dims_inputs.push_back(Gather(scope, input_shape, i));
}
}
const Output reduced_dims = Stack(scope, reduced_dims_inputs);
Tensor reduced_axes_tensor(
DataType::DT_INT32, TensorShape({static_cast<int>(reduced_axes.size())}));
std::copy_n(reduced_axes.begin(), reduced_axes.size(),
reduced_axes_tensor.flat<int>().data());
return std::make_tuple(reduced_subs, reduced_dims,
Const(scope, reduced_axes_tensor));
}
Output EinsumGradReducedHelper(const Scope& scope, const Output& output_grad,
absl::string_view output_subs,
absl::string_view input_subs,
const Output& input_shape,
const absl::btree_set<char>& reduced_label_set) {
std::string reduced_subs;
Output reduced_dims, reduced_axes;
std::tie(reduced_subs, reduced_dims, reduced_axes) =
EinsumGetReducedSubscripts(scope, reduced_label_set, input_shape,
input_subs);
const int distinct_input_labels =
absl::flat_hash_set<char>(input_subs.begin(), input_subs.end()).size();
const int distinct_output_labels =
absl::flat_hash_set<char>(output_subs.begin(), output_subs.end()).size();
const bool has_repeated_labels =
(distinct_input_labels + distinct_output_labels) <
input_subs.length() + output_subs.length();
std::string input_subs_without_reduced_labels;
for (const char s : input_subs) {
if (!absl::c_linear_search(reduced_label_set, s)) {
input_subs_without_reduced_labels.push_back(s);
}
}
if (!has_repeated_labels &&
input_subs_without_reduced_labels == output_subs) {
auto reduced_shape = ReducedShapeHelper(scope, input_shape, reduced_axes);
return BroadcastTo(scope, Reshape(scope, output_grad, reduced_shape),
input_shape);
}
Output output_grad_shape = Shape(scope, output_grad);
auto grad_shape_with_reduced_labels =
Concat(scope, {reduced_dims, output_grad_shape}, 0);
auto reduced_shape = Concat(
scope,
{Const(scope, 1, TensorShape{static_cast<int>(reduced_label_set.size())}),
output_grad_shape},
0);
Output broadcasted_grad =
BroadcastTo(scope, Reshape(scope, output_grad, reduced_shape),
grad_shape_with_reduced_labels);
return Einsum(scope, {broadcasted_grad},
absl::StrCat(reduced_subs, output_subs, "->", input_subs));
}
Output EinsumGradWrt(const Scope& scope, Output output_grad,
Output other_operand, Output input_shape,
absl::string_view input_subs, absl::string_view other_subs,
absl::string_view output_subs) {
absl::btree_set<char> reduced_label_set(input_subs.begin(), input_subs.end());
for (const char x : output_subs) {
reduced_label_set.erase(x);
}
for (const char x : other_subs) {
reduced_label_set.erase(x);
}
reduced_label_set.erase('.');
std::string left_subs;
for (const char s : input_subs) {
if (!reduced_label_set.contains(s)) {
left_subs.push_back(s);
}
}
Output grad_reduced =
Einsum(scope, {output_grad, other_operand},
absl::StrCat(output_subs, ",", other_subs, "->", left_subs));
if (reduced_label_set.empty()) {
return grad_reduced;
}
return EinsumGradReducedHelper(scope, grad_reduced, left_subs, input_subs,
input_shape, reduced_label_set);
}
Status EinsumGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
if (grad_inputs.size() != 1) {
return errors::InvalidArgument("Expect 1 grad input.");
}
const Output& grad = grad_inputs[0];
std::string equation;
TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "equation", &equation));
std::vector<absl::string_view> equation_split =
absl::StrSplit(equation, "->");
if (equation_split.size() != 2) {
return errors::InvalidArgument("Equation must contain a single ->");
}
const absl::string_view input_subs = equation_split[0];
const absl::string_view output_subs = equation_split[1];
if (op.num_inputs() == 1) {
auto input_shape = Shape(scope, op.input(0));
absl::btree_set<char> reduced_label_set(input_subs.begin(),
input_subs.end());
for (const char x : output_subs) {
reduced_label_set.erase(x);
}
reduced_label_set.erase('.');
if (reduced_label_set.empty()) {
grad_outputs->push_back(Einsum(
scope, grad_inputs, absl::StrCat(output_subs, "->", input_subs)));
return scope.status();
}
grad_outputs->push_back(EinsumGradReducedHelper(
scope, grad, output_subs, input_subs, input_shape, reduced_label_set));
return scope.status();
}
std::vector<absl::string_view> subs = absl::StrSplit(input_subs, ',');
if (subs.size() != 2) {
return errors::InvalidArgument("Only 2 inputs are supported");
}
std::string x_subs(subs[0]);
std::string y_subs(subs[1]);
if (absl::StrContains(output_subs, kEllipsis)) {
if (!absl::StrContains(x_subs, kEllipsis)) {
absl::StrAppend(&x_subs, kEllipsis);
}
if (!absl::StrContains(y_subs, kEllipsis)) {
absl::StrAppend(&y_subs, kEllipsis);
}
}
tensorflow::Output x = op.input(0);
tensorflow::Output y = op.input(1);
if (DataTypeIsComplex(grad.type())) {
x = Conj(scope, x);
y = Conj(scope, y);
}
const auto x_shape = Shape(scope, x);
const auto y_shape = Shape(scope, y);
Output grad_x =
EinsumGradWrt(scope, grad, y, x_shape, x_subs, y_subs, output_subs);
Output grad_y =
EinsumGradWrt(scope, grad, x, y_shape, y_subs, x_subs, output_subs);
if (!absl::StrContains(output_subs, kEllipsis)) {
grad_outputs->push_back(grad_x);
grad_outputs->push_back(grad_y);
return scope.status();
}
int bx_start, by_start;
absl::optional<int> bx_end, by_end;
std::tie(bx_start, bx_end) = EinsumGetBcastSubshape(x_subs);
std::tie(by_start, by_end) = EinsumGetBcastSubshape(y_subs);
auto args = internal::BroadcastGradientArgs(
scope, Slice1dHelper(scope, x_shape, bx_start, bx_end),
Slice1dHelper(scope, y_shape, by_start, by_end));
grad_x = Reshape(
scope, ReduceSum(scope, grad_x, Add(scope, bx_start, args.r0)), x_shape);
grad_y = Reshape(
scope, ReduceSum(scope, grad_y, Add(scope, by_start, args.r1)), y_shape);
grad_outputs->push_back(grad_x);
grad_outputs->push_back(grad_y);
return scope.status();
}
REGISTER_GRADIENT_OP("Einsum", EinsumGrad);
}
}
} | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/framework/testutil.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using tensorflow::ops::Einsum;
using tensorflow::ops::Placeholder;
class LinalgGradTest : public ::testing::Test {
protected:
LinalgGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
void RunTest(const OutputList& xs, const std::vector<TensorShape>& x_shapes,
const OutputList& ys, const std::vector<TensorShape>& y_shapes) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, xs, x_shapes, ys, y_shapes, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
Scope scope_;
};
TEST_F(LinalgGradTest, Einsum_Transpose) {
TensorShape x_shape({2, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Einsum(scope_, {x}, "ij->ji");
TensorShape y_shape({3, 2});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(LinalgGradTest, Einsum_TransposeBroadcast) {
TensorShape x_shape({3, 2, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto y = Einsum(scope_, {x}, "...ij->...ji");
TensorShape y_shape({3, 3, 2});
RunTest({x}, {x_shape}, {y}, {y_shape});
}
TEST_F(LinalgGradTest, Einsum_MatMul) {
TensorShape x_shape({2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "ij,jk->ik");
TensorShape z_shape({2, 3});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_MatMulComplex) {
TensorShape x_shape({2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_COMPLEX64, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_COMPLEX64, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "ij,jk->ik");
TensorShape z_shape({2, 3});
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<complex64, complex64, float>(
scope_, {x, y}, {x_shape, y_shape}, {z}, {z_shape}, &max_error)));
EXPECT_LT(max_error, 1e-3);
}
TEST_F(LinalgGradTest, Einsum_MatMulBroadcast) {
TensorShape x_shape({3, 2, 3});
TensorShape y_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "...ij,...jk->...ik");
TensorShape z_shape({3, 2, 3});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_Trace) {
TensorShape x_shape({3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto z = Einsum(scope_, {x}, "ii->");
TensorShape z_shape({});
RunTest({x}, {x_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_TraceBroadcast) {
TensorShape x_shape({4, 3, 3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
auto z = Einsum(scope_, {x}, "...ii->...");
TensorShape z_shape({4});
RunTest({x}, {x_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_DotProduct) {
TensorShape x_shape({3});
TensorShape y_shape({3});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "i,i->");
TensorShape z_shape({});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_OuterProduct) {
TensorShape x_shape({3});
TensorShape y_shape({5});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "i,j->ij");
TensorShape z_shape({3, 5});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
TEST_F(LinalgGradTest, Einsum_TwoInputReduction) {
TensorShape x_shape({3, 2, 4});
TensorShape y_shape({4, 5});
Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
auto z = Einsum(scope_, {x, y}, "abc,cd->ad");
TensorShape z_shape({3, 5});
RunTest({x, y}, {x_shape, y_shape}, {z}, {z_shape});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/linalg_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/linalg_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
332dbf76-b2f0-4a9f-8282-433353f67b1c | cpp | tensorflow/tensorflow | options_dataset_op | tensorflow/core/kernels/data/options_dataset_op.cc | tensorflow/core/kernels/data/options_dataset_op_test.cc | #include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
namespace data {
constexpr const char* const OptionsDatasetOp::kDatasetType;
constexpr const char* const OptionsDatasetOp::kInputDataset;
constexpr const char* const OptionsDatasetOp::kOutputTypes;
constexpr const char* const OptionsDatasetOp::kOutputShapes;
constexpr const char* const OptionsDatasetOp::kSerializedOptions;
class OptionsDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
const string& serialized_options)
: DatasetBase(DatasetContext(ctx)),
input_(input),
serialized_options_(serialized_options) {
input_->Ref();
Options options;
OP_REQUIRES(ctx, options.ParseFromString(serialized_options),
errors::InvalidArgument(absl::StrCat(
"Could not parse ", OptionsDatasetOp::kSerializedOptions,
" as valid Options.")));
set_options(options);
random_indexing_compatible_ = absl::OkStatus();
if (input_ != nullptr) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
DCHECK(false) << "OptionsDatasetOp::Dataset::MakeIteratorInternal is not "
"expected to be called because it is supposed to forward "
"the iterator to its input dataset(s).";
LOG(ERROR) << "Datasets of type " << type_string()
<< " forwards its iterator to its input dataset. "
"`MakeIteratorInternal` is not implemented.";
return nullptr;
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
return input_->Get(ctx, index, out_tensors);
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
AttrValue serialized_options_attr;
b->BuildAttrValue(serialized_options_, &serialized_options_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_graph_node},
{std::make_pair(kSerializedOptions, serialized_options_attr)}, output));
return absl::OkStatus();
}
private:
const DatasetBase* input_;
const tstring serialized_options_;
absl::Status random_indexing_compatible_;
};
void OptionsDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &input));
*output = new Dataset(ctx, input, serialized_options_);
}
OptionsDatasetOp::OptionsDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kSerializedOptions, &serialized_options_));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("OptionsDataset").Device(DEVICE_CPU).Priority(2),
OptionsDatasetOp);
REGISTER_KERNEL_BUILDER(Name("OptionsDataset")
.Device(DEVICE_GPU)
.HostMemory("input_dataset")
.HostMemory("handle")
.Priority(1),
OptionsDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/options_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/kernels/data/range_dataset_op.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kOptions[] = R"proto(
deterministic: true
slack: true
optimization_options { apply_default_optimizations: true autotune: true }
)proto";
class OptionsDatasetOpTest : public DatasetOpsTestBase {};
OptionsDatasetParams OptionsDatasetParams0() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 10, 3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_0");
}
OptionsDatasetParams OptionsDatasetParams1() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(10, 0, -3),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_1");
}
OptionsDatasetParams OptionsDatasetParams2() {
Options options;
protobuf::TextFormat::ParseFromString(kOptions, &options);
return OptionsDatasetParams(RangeDatasetParams(0, 5, 1),
options.SerializeAsString(),
{DT_INT64},
{PartialTensorShape({})},
"options_dataset_2");
}
std::vector<GetNextTestCase<OptionsDatasetParams>> GetNextTestCases() {
return {{OptionsDatasetParams0(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {3}, {6}, {9}})},
{OptionsDatasetParams1(),
CreateTensors<int64_t>(TensorShape({}), {{10}, {7}, {4}, {1}})},
{OptionsDatasetParams2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
ITERATOR_GET_NEXT_TEST_P(OptionsDatasetOpTest, OptionsDatasetParams,
GetNextTestCases())
TEST_F(OptionsDatasetOpTest, DatasetOptions) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
Options expected_options;
protobuf::TextFormat::ParseFromString(kOptions, &expected_options);
TF_ASSERT_OK(CheckDatasetOptions(expected_options));
}
TEST_F(OptionsDatasetOpTest, DatasetNodeName) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(OptionsDatasetOpTest, DatasetTypeString) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(OptionsDatasetOp::kDatasetType)));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputDTypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, DatasetoutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, DatasetCardinality) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetCardinality(4));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
TEST_F(OptionsDatasetOpTest, IteratorOutputShapes) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputShapes({PartialTensorShape({})}));
}
TEST_F(OptionsDatasetOpTest, IteratorPrefix) {
auto dataset_params = OptionsDatasetParams0();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
RangeDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/options_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/options_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |