ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
32ba9709-ceb9-419a-9415-2ed32cd72d6a | cpp | google/quiche | rst_stream_payload_decoder | quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.cc | quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus RstStreamPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "RstStreamPayloadDecoder::StartDecodingPayload: "
<< state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::RST_STREAM, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
QUICHE_DCHECK_EQ(0, state->frame_header().flags);
state->InitializeRemainders();
return HandleStatus(
state, state->StartDecodingStructureInPayload(&rst_stream_fields_, db));
}
DecodeStatus RstStreamPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "RstStreamPayloadDecoder::ResumeDecodingPayload"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
QUICHE_DCHECK_EQ(Http2FrameType::RST_STREAM, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
return HandleStatus(
state, state->ResumeDecodingStructureInPayload(&rst_stream_fields_, db));
}
DecodeStatus RstStreamPayloadDecoder::HandleStatus(FrameDecoderState* state,
DecodeStatus status) {
QUICHE_DVLOG(2) << "HandleStatus: status=" << status
<< "; remaining_payload=" << state->remaining_payload();
if (status == DecodeStatus::kDecodeDone) {
if (state->remaining_payload() == 0) {
state->listener()->OnRstStream(state->frame_header(),
rst_stream_fields_.error_code);
return DecodeStatus::kDecodeDone;
}
return state->ReportFrameSizeError();
}
QUICHE_DCHECK(
(status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError && state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
return status;
}
} | #include "quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_constants_test_util.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class RstStreamPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::RST_STREAM;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnRstStream(const Http2FrameHeader& header,
Http2ErrorCode error_code) override {
QUICHE_VLOG(1) << "OnRstStream: " << header
<< "; error_code=" << error_code;
StartAndEndFrame(header)->OnRstStream(header, error_code);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class RstStreamPayloadDecoderTest
: public AbstractPayloadDecoderTest<RstStreamPayloadDecoder,
RstStreamPayloadDecoderPeer, Listener> {
protected:
Http2RstStreamFields RandRstStreamFields() {
Http2RstStreamFields fields;
test::Randomize(&fields, RandomPtr());
return fields;
}
};
TEST_F(RstStreamPayloadDecoderTest, WrongSize) {
auto approve_size = [](size_t size) {
return size != Http2RstStreamFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(RandRstStreamFields());
fb.Append(RandRstStreamFields());
fb.Append(RandRstStreamFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
TEST_F(RstStreamPayloadDecoderTest, AllErrors) {
for (auto error_code : AllHttp2ErrorCodes()) {
Http2RstStreamFields fields{error_code};
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::RST_STREAM, RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header);
expected.SetOptRstStreamErrorCode(error_code);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4506e907-c408-4e9e-a9ab-f52b10e03bdf | cpp | tensorflow/tensorflow | kernel_fallback_compat_request_state | tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.cc | tensorflow/core/runtime_fallback/test/kernel_fallback_compat_request_state_test.cc | #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <cstdlib>
#include <cstring>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/platform/threadpool_interface.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tfrt/graph_executor/config.h"
#include "tensorflow/core/tfrt/utils/fallback_tensor.h"
#include "tfrt/host_context/resource_context.h"
#include "tfrt/support/pointer_util.h"
namespace tensorflow {
namespace tfd {
using ::tensorflow::tfrt_stub::OpKernelRunnerTable;
void FallbackResourceArray::SetResource(
int index, tensorflow::tfrt_stub::ImmutableTensor tensor) {
if (resource_async_values_.size() <= index) {
resource_storage_.resize(index + 1);
resource_async_values_.resize(index + 1);
}
DCHECK(resource_storage_[index].get() == nullptr);
DCHECK(resource_async_values_[index].AsPtr().value() == nullptr);
resources_.push_back(std::make_unique<tensorflow::tfrt_stub::ImmutableTensor>(
std::move(tensor)));
resource_storage_[index] = std::make_unique<
tfrt::internal::AsyncValueStorage<tfrt_stub::FallbackTensor>>();
resource_async_values_[index] =
tfrt::MakeAvailableAsyncValueRef<tfrt_stub::FallbackTensor>(
*resource_storage_[index], resources_.back().get());
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer> step_container,
std::unique_ptr<CollectiveExecutor::Handle> collective_executor_handle,
core::RefCountPtr<Rendezvous> rendezvous, OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: step_id_(step_id),
runner_(runner),
step_container_(std::move(step_container)),
collective_executor_handle_(std::move(collective_executor_handle)),
collective_executor_(collective_executor_handle_
? collective_executor_handle_->get()
: nullptr),
rendezvous_(std::move(rendezvous)),
device_manager_(device_manager),
runner_table_(runner_table),
resource_array_(resource_array),
intra_op_threadpool_(user_intra_op_threadpool),
pflr_(pflr) {
DCHECK(runner_);
DCHECK(device_manager_);
DCHECK(runner_table_);
DCHECK(resource_array_);
DCHECK(rendezvous_);
DCHECK(pflr_);
cpu_device_ = device_manager_->HostCPU();
cpu_function_library_runtime_ = pflr_->GetFLR(cpu_device_->name());
if (user_intra_op_threadpool != nullptr) {
custom_cpu_device_ = tensorflow::RenamedDevice::NewRenamedDevice(
cpu_device_->name(), cpu_device_, false,
false, user_intra_op_threadpool);
cpu_device_ = custom_cpu_device_.get();
for (auto* device : device_manager_->ListDevices()) {
custom_device_[device] = tensorflow::RenamedDevice::NewRenamedDevice(
device->name(), device, false,
false, user_intra_op_threadpool);
}
}
if (model_metadata.has_value()) {
session_metadata_ = *model_metadata;
}
}
KernelFallbackCompatRequestState::KernelFallbackCompatRequestState(
std::function<void(std::function<void()>)>* runner,
const tensorflow::DeviceMgr* device_manager, int64_t step_id,
OpKernelRunnerTable* runner_table, FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
const tensorflow::ProcessFunctionLibraryRuntime* pflr)
: KernelFallbackCompatRequestState(
runner, device_manager, step_id,
tfrt::OwnedOrUnownedPtr<ScopedStepContainer>{
std::make_unique<ScopedStepContainer>(
step_id,
[step_id, device_manager](const std::string& name) {
for (tensorflow::Device* device :
device_manager->ListDevices()) {
auto status = device->resource_manager()->Cleanup(name);
(void)status;
tensorflow::ScopedAllocatorMgr* sam =
device->GetScopedAllocatorMgr();
if (sam) sam->Cleanup(step_id);
}
})},
nullptr,
core::RefCountPtr<RefCountedIntraProcessRendezvous>(
new RefCountedIntraProcessRendezvous(device_manager)),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr) {}
static std::function<void(std::function<void()>)>* GetDefaultRunner() {
static auto* const default_runner =
new std::function<void(std::function<void()>)>(
[](const std::function<void()>& f) { f(); });
return default_runner;
}
Status SetUpKernelFallbackCompatRequestContext(
tfrt::RequestContextBuilder* builder,
const tensorflow::DeviceMgr* device_manager,
const tensorflow::ProcessFunctionLibraryRuntime* pflr,
tfrt_stub::OpKernelRunnerTable* runner_table,
FallbackResourceArray* resource_array,
tensorflow::thread::ThreadPoolInterface* user_intra_op_threadpool,
const absl::optional<SessionMetadata>& model_metadata,
std::function<void(std::function<void()>)>* runner,
tfrt_stub::CostRecorder* cost_recorder,
tfrt::ResourceContext* client_graph_resource_context,
tensorflow::CancellationManager* cancellation_manager,
const tensorflow::tfrt_stub::RuntimeConfig* runtime_config) {
DCHECK(builder);
DCHECK(device_manager);
DCHECK(pflr);
DCHECK(runner_table);
DCHECK(resource_array);
auto& fallback_request_state =
builder->context_data().emplace<KernelFallbackCompatRequestState>(
runner ? runner : GetDefaultRunner(), device_manager, builder->id(),
runner_table, resource_array, user_intra_op_threadpool,
model_metadata, pflr);
fallback_request_state.set_cost_recorder(cost_recorder);
fallback_request_state.set_client_graph_resource_context(
client_graph_resource_context);
fallback_request_state.set_cancellation_manager(cancellation_manager);
fallback_request_state.set_runtime_config(runtime_config);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.h"
#include <gtest/gtest.h>
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
namespace tfd {
namespace {
TEST(FallbackResourceArrayTest, SetAndGetResourceOk) {
Tensor tensor_1 =
test::AsTensor<float>({0.0, 1.0, 2.0, 3.0}, TensorShape({1, 4}));
tfrt_stub::ImmutableTensor imm_tensor_1 =
tfrt_stub::ImmutableTensor::Create(tensor_1);
tensorflow::Tensor tensor_2 =
test::AsTensor<float>({5.0, 6.0, 7.0}, tensorflow::TensorShape({1, 3}));
tfrt_stub::ImmutableTensor imm_tensor_2 =
tfrt_stub::ImmutableTensor::Create(tensor_2);
FallbackResourceArray resource_array;
resource_array.SetResource(0, imm_tensor_1);
resource_array.SetResource(1, imm_tensor_2);
test::ExpectTensorEqual<float>(resource_array.GetResource(0)->tensor(),
tensor_1);
test::ExpectTensorEqual<float>(resource_array.GetResource(1)->tensor(),
tensor_2);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(0).tensor(), tensor_1);
test::ExpectTensorEqual<float>(
resource_array.GetResourceAsFallbackTensor(1).tensor(), tensor_2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/kernel_fallback_compat_request_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/test/kernel_fallback_compat_request_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a967d760-b018-42d6-a3dc-cae0cbac31b8 | cpp | google/tensorstore | concurrent | tensorstore/internal/testing/concurrent.cc | tensorstore/internal/testing/concurrent_test.cc | #include "tensorstore/internal/testing/concurrent.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#endif
namespace tensorstore {
namespace internal_testing {
#ifdef _WIN32
TestConcurrentLock::TestConcurrentLock() {
handle_ = ::CreateMutexA(nullptr,
FALSE,
"TensorStoreTestConcurrentMutex");
ABSL_CHECK(handle_ != nullptr);
if (::WaitForSingleObject(handle_, 0 ) != WAIT_OBJECT_0) {
ABSL_LOG(INFO) << "Waiting on WIN32 Concurrent Lock";
ABSL_CHECK(::WaitForSingleObject(handle_, INFINITE) == WAIT_OBJECT_0);
}
}
TestConcurrentLock::~TestConcurrentLock() {
ABSL_CHECK(::ReleaseMutex(handle_));
::CloseHandle(handle_);
}
#endif
}
} | #include "tensorstore/internal/testing/concurrent.h"
#include <atomic>
#include <type_traits>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h"
namespace {
using ::tensorstore::internal_testing::TestConcurrent;
TEST(TestConcurrent, EnsureContentionHappens) {
static constexpr int kIterations = 100;
static constexpr int kN = 20;
absl::Mutex lock;
int uncontended{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {},
[&](auto) {
if (lock.TryLock()) {
uncontended++;
lock.Unlock();
}
});
int contended = (kIterations * kN) - uncontended;
ABSL_LOG(INFO) << "Contended in " << contended << " of 2000 iterations.";
}
TEST(TestConcurrent, Example1) {
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent(
kIterations,
[&] {},
[&] {},
[&]() { sum += 1; }, [&]() { sum += 2; }, [&]() { sum += 3; });
EXPECT_EQ(100 + 200 + 300, sum);
}
template <typename T>
struct TestConcurrentFixture : public ::testing::Test {};
using ConcurrentOpSizes = ::testing::Types<std::integral_constant<int, 1>,
std::integral_constant<int, 4>,
std::integral_constant<int, 16>>;
TYPED_TEST_SUITE(TestConcurrentFixture, ConcurrentOpSizes);
TYPED_TEST(TestConcurrentFixture, Example2) {
static constexpr int kN = TypeParam{}();
static constexpr int kIterations = 100;
std::atomic<int> sum{0};
TestConcurrent<kN>(
kIterations,
[&] {},
[&] {}, [&](auto i) { sum += (i + 1); });
EXPECT_EQ((kIterations / 2) * kN * (kN + 1), sum);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/testing/concurrent.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/testing/concurrent_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
31ca920e-06dc-41f7-8f63-bf1d9dda7801 | cpp | tensorflow/tensorflow | numerical_utils | tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc | tensorflow/compiler/mlir/lite/quantization/numerical_utils_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <optional>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
QuantizedMultiplier QuantizeMultiplier(double double_multiplier) {
if (double_multiplier < 1e-6) {
return {0, 0};
}
int32_t shift;
const double q = frexp(double_multiplier, &shift);
int64_t quantized_multiplier = round(q * (1LL << 31));
assert(quantized_multiplier <= (1LL << 31));
if (quantized_multiplier == (1LL << 31)) {
quantized_multiplier /= 2;
++shift;
}
assert(quantized_multiplier <= std::numeric_limits<int32_t>::max());
if (shift > 31 || shift < -31) {
return {0, 0};
}
return {static_cast<int32_t>(quantized_multiplier), shift};
}
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax) {
auto quantize = [scale, zero_point](float f) {
return zero_point + static_cast<int32_t>(std::round(f / scale));
};
if (rmin.has_value() && rmax.has_value()) {
return {std::max(qmin, quantize(rmin.value())),
std::min(qmax, quantize(rmax.value()))};
} else if (rmin.has_value()) {
return {std::max(qmin, quantize(rmin.value())), qmax};
} else if (rmax.has_value()) {
return {qmin, std::min(qmax, quantize(rmax.value()))};
} else {
return {qmin, qmax};
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <cmath>
#include <optional>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
namespace {
double ComposeScale(const QuantizedMultiplier& input) {
return input.first * exp2(-31 + input.second);
}
TEST(NumericalUtils, QuantizeMultiplier) {
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e6)), 1.0e6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e3)), 1.0e3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(10.)), 10.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(5.)), 5.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(2.)), 2.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(0.0)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0)), 1.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-1)), 1.0e-1);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-2)), 1.0e-2);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-3)), 1.0e-3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-4)), 1.0e-4);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-5)), 1.0e-5);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-6)), 1.0e-6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-7)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-8)), 0.0);
}
TEST(NumericalUtils, ActivationRange) {
auto a =
CalculateQuantizedRange(1e-6, 0, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(a.first, -128);
ASSERT_EQ(a.second, 127);
auto b = CalculateQuantizedRange(1e-6, 0, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(b.first, 0);
ASSERT_EQ(b.second, 127);
auto c = CalculateQuantizedRange(1e-6, 0, -1.0, 1.0, -128, 127);
ASSERT_EQ(c.first, -128);
ASSERT_EQ(c.second, 127);
auto d = CalculateQuantizedRange(1e-6, 0, 0.0, 6.0, -128, 127);
ASSERT_EQ(d.first, 0);
ASSERT_EQ(d.second, 127);
auto e =
CalculateQuantizedRange(1e-6, 100, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(e.first, -128);
ASSERT_EQ(e.second, 127);
auto f = CalculateQuantizedRange(1e-6, 100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(f.first, 100);
ASSERT_EQ(f.second, 127);
auto g = CalculateQuantizedRange(1e-6, 100, -1.0, 1.0, -128, 127);
ASSERT_EQ(g.first, -128);
ASSERT_EQ(g.second, 127);
auto h = CalculateQuantizedRange(1e-6, 100, 0.0, 6.0, -128, 127);
ASSERT_EQ(h.first, 100);
ASSERT_EQ(h.second, 127);
auto i = CalculateQuantizedRange(1e-6, -100, std::nullopt, std::nullopt, -128,
127);
ASSERT_EQ(i.first, -128);
ASSERT_EQ(i.second, 127);
auto j = CalculateQuantizedRange(1e-6, -100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(j.first, -100);
ASSERT_EQ(j.second, 127);
auto k = CalculateQuantizedRange(1e-6, -100, -1.0, 1.0, -128, 127);
ASSERT_EQ(k.first, -128);
ASSERT_EQ(k.second, 127);
auto l = CalculateQuantizedRange(1e-6, -100, 0.0, 6.0, -128, 127);
ASSERT_EQ(l.first, -100);
ASSERT_EQ(l.second, 127);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/numerical_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
db30c870-4a02-4b6c-9779-ea72313933d7 | cpp | google/tsl | gcs_throttle | tsl/platform/cloud/gcs_throttle.cc | tsl/platform/cloud/gcs_throttle_test.cc | #include "tsl/platform/cloud/gcs_throttle.h"
#include <algorithm>
namespace tsl {
namespace {
EnvTime* get_default_env_time() {
static EnvTime* default_env_time = new EnvTime;
return default_env_time;
}
}
GcsThrottle::GcsThrottle(EnvTime* env_time)
: last_updated_secs_(env_time ? env_time->GetOverridableNowSeconds()
: EnvTime::NowSeconds()),
available_tokens_(0),
env_time_(env_time ? env_time : get_default_env_time()) {}
bool GcsThrottle::AdmitRequest() {
mutex_lock l(mu_);
UpdateState();
if (available_tokens_ < config_.tokens_per_request) {
return false || !config_.enabled;
}
available_tokens_ -= config_.tokens_per_request;
return true;
}
void GcsThrottle::RecordResponse(size_t num_bytes) {
mutex_lock l(mu_);
UpdateState();
available_tokens_ -= request_bytes_to_tokens(num_bytes);
}
void GcsThrottle::SetConfig(GcsThrottleConfig config) {
mutex_lock l(mu_);
config_ = config;
available_tokens_ = config.initial_tokens;
last_updated_secs_ = env_time_->GetOverridableNowSeconds();
}
void GcsThrottle::UpdateState() {
int64_t now = env_time_->GetOverridableNowSeconds();
uint64 delta_secs =
std::max(int64_t{0}, now - static_cast<int64_t>(last_updated_secs_));
available_tokens_ += delta_secs * config_.token_rate;
available_tokens_ = std::min(available_tokens_, config_.bucket_size);
last_updated_secs_ = now;
}
} | #include "tsl/platform/cloud/gcs_throttle.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
class TestTime : public EnvTime {
public:
uint64 GetOverridableNowNanos() const override {
return now_micros_ * kMicrosToNanos;
}
void SetTime(uint64 now_micros) { now_micros_ = now_micros; }
void AdvanceSeconds(int64_t secs) { now_micros_ += secs * kSecondsToMicros; }
private:
uint64 now_micros_ = 1234567890000000ULL;
};
class GcsThrottleTest : public ::testing::Test {
protected:
GcsThrottleTest() : throttle_(&time_) {
config_.enabled = true;
throttle_.SetConfig(config_);
}
GcsThrottleConfig config_;
TestTime time_;
GcsThrottle throttle_;
};
TEST_F(GcsThrottleTest, ReplenishTokens) {
EXPECT_EQ(0, throttle_.available_tokens());
time_.AdvanceSeconds(1);
EXPECT_EQ(100000, throttle_.available_tokens());
time_.AdvanceSeconds(2);
EXPECT_EQ(300000, throttle_.available_tokens());
}
TEST_F(GcsThrottleTest, RejectRequest) {
EXPECT_EQ(0, throttle_.available_tokens());
time_.AdvanceSeconds(1);
EXPECT_TRUE(throttle_.AdmitRequest());
EXPECT_EQ(99900, throttle_.available_tokens());
for (int i = 1; i < 1000; i++) {
EXPECT_TRUE(throttle_.AdmitRequest());
}
EXPECT_FALSE(throttle_.AdmitRequest());
}
TEST_F(GcsThrottleTest, MarkResponses) {
time_.AdvanceSeconds(1);
EXPECT_TRUE(throttle_.AdmitRequest());
throttle_.RecordResponse(128000000);
EXPECT_EQ(-25100, throttle_.available_tokens());
EXPECT_FALSE(throttle_.AdmitRequest());
time_.AdvanceSeconds(1);
EXPECT_TRUE(throttle_.AdmitRequest())
<< "Available tokens: " << throttle_.available_tokens();
}
TEST_F(GcsThrottleTest, Skippingtime_) {
EXPECT_EQ(0, throttle_.available_tokens());
time_.AdvanceSeconds(90);
EXPECT_EQ(9000000, throttle_.available_tokens());
}
TEST_F(GcsThrottleTest, BucketLimit) {
time_.AdvanceSeconds(120);
EXPECT_EQ(10000000, throttle_.available_tokens());
}
TEST_F(GcsThrottleTest, ReverseTime) {
time_.AdvanceSeconds(1);
EXPECT_EQ(100000, throttle_.available_tokens());
time_.AdvanceSeconds(-3600);
EXPECT_EQ(100000, throttle_.available_tokens());
time_.AdvanceSeconds(1);
EXPECT_EQ(200000, throttle_.available_tokens());
}
TEST(GcsThrottleDisabledTest, Disabled) {
TestTime time;
GcsThrottle throttle(&time);
ASSERT_FALSE(throttle.is_enabled());
EXPECT_EQ(0, throttle.available_tokens());
time.AdvanceSeconds(1);
EXPECT_EQ(100000, throttle.available_tokens());
EXPECT_TRUE(throttle.AdmitRequest());
EXPECT_EQ(99900, throttle.available_tokens());
time.AdvanceSeconds(1);
EXPECT_EQ(199900, throttle.available_tokens());
throttle.RecordResponse(128000000);
EXPECT_LT(0, throttle.available_tokens());
EXPECT_TRUE(throttle.AdmitRequest());
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/gcs_throttle.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/gcs_throttle_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
829c19fc-b072-493f-add4-3d8563d80635 | cpp | google/libphonenumber | regexp_adapter | cpp/src/phonenumbers/regexp_adapter.h | cpp/test/phonenumbers/regexp_adapter_test.cc | #ifndef I18N_PHONENUMBERS_REGEXP_ADAPTER_H_
#define I18N_PHONENUMBERS_REGEXP_ADAPTER_H_
#include <cstddef>
#include <string>
namespace i18n {
namespace phonenumbers {
using std::string;
class RegExpInput {
public:
virtual ~RegExpInput() {}
virtual string ToString() const = 0;
};
class RegExp {
public:
virtual ~RegExp() {}
virtual bool Consume(RegExpInput* input_string,
bool anchor_at_start,
string* matched_string1,
string* matched_string2,
string* matched_string3,
string* matched_string4,
string* matched_string5,
string* matched_string6) const = 0;
inline bool Consume(RegExpInput* input_string, string* matched_string1,
string* matched_string2,
string* matched_string3,
string* matched_string4,
string* matched_string5,
string* matched_string6) const {
return Consume(input_string, true, matched_string1, matched_string2,
matched_string3, matched_string4, matched_string5,
matched_string6);
}
inline bool Consume(RegExpInput* input_string, string* matched_string1,
string* matched_string2,
string* matched_string3,
string* matched_string4,
string* matched_string5) const {
return Consume(input_string, true, matched_string1, matched_string2,
matched_string3, matched_string4, matched_string5, NULL);
}
inline bool Consume(RegExpInput* input_string, string* matched_string1,
string* matched_string2,
string* matched_string3,
string* matched_string4) const {
return Consume(input_string, true, matched_string1, matched_string2,
matched_string3, matched_string4, NULL, NULL);
}
inline bool Consume(RegExpInput* input_string,
string* matched_string1,
string* matched_string2,
string* matched_string3) const {
return Consume(input_string, true, matched_string1, matched_string2,
matched_string3, NULL, NULL, NULL);
}
inline bool Consume(RegExpInput* input_string,
string* matched_string1,
string* matched_string2) const {
return Consume(input_string, true, matched_string1, matched_string2, NULL,
NULL, NULL, NULL);
}
inline bool Consume(RegExpInput* input_string, string* matched_string) const {
return Consume(input_string, true, matched_string, NULL, NULL, NULL, NULL,
NULL);
}
inline bool Consume(RegExpInput* input_string) const {
return Consume(input_string, true, NULL, NULL, NULL, NULL, NULL, NULL);
}
inline bool FindAndConsume(RegExpInput* input_string,
string* matched_string) const {
return Consume(input_string, false, matched_string, NULL, NULL, NULL, NULL,
NULL);
}
virtual bool Match(const string& input_string,
bool full_match,
string* matched_string) const = 0;
inline bool PartialMatch(const string& input_string,
string* matched_string) const {
return Match(input_string, false, matched_string);
}
inline bool PartialMatch(const string& input_string) const {
return Match(input_string, false, NULL);
}
inline bool FullMatch(const string& input_string,
string* matched_string) const {
return Match(input_string, true, matched_string);
}
inline bool FullMatch(const string& input_string) const {
return Match(input_string, true, NULL);
}
virtual bool Replace(string* string_to_process,
bool global,
const string& replacement_string) const = 0;
inline bool Replace(string* string_to_process,
const string& replacement_string) const {
return Replace(string_to_process, false, replacement_string);
}
inline bool GlobalReplace(string* string_to_process,
const string& replacement_string) const {
return Replace(string_to_process, true, replacement_string);
}
};
class AbstractRegExpFactory {
public:
virtual ~AbstractRegExpFactory() {}
virtual RegExpInput* CreateInput(const string& utf8_input) const = 0;
virtual RegExp* CreateRegExp(const string& utf8_regexp) const = 0;
};
}
}
#endif | #include "phonenumbers/regexp_adapter.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "phonenumbers/base/memory/scoped_ptr.h"
#include "phonenumbers/stl_util.h"
#include "phonenumbers/stringutil.h"
#ifdef I18N_PHONENUMBERS_USE_RE2
#include "phonenumbers/regexp_adapter_re2.h"
#else
#include "phonenumbers/regexp_adapter_icu.h"
#endif
namespace i18n {
namespace phonenumbers {
using std::vector;
struct RegExpTestContext {
explicit RegExpTestContext(const string& name,
const AbstractRegExpFactory* factory)
: name(name),
factory(factory),
digits(factory->CreateRegExp("\\d+")),
parentheses_digits(factory->CreateRegExp("\\((\\d+)\\)")),
single_digit(factory->CreateRegExp("\\d")),
two_digit_groups(factory->CreateRegExp("(\\d+)-(\\d+)")),
six_digit_groups(factory->CreateRegExp(
"(\\d+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)-(\\d+)")) {}
const string name;
const scoped_ptr<const AbstractRegExpFactory> factory;
const scoped_ptr<const RegExp> digits;
const scoped_ptr<const RegExp> parentheses_digits;
const scoped_ptr<const RegExp> single_digit;
const scoped_ptr<const RegExp> two_digit_groups;
const scoped_ptr<const RegExp> six_digit_groups;
};
class RegExpAdapterTest : public testing::Test {
protected:
RegExpAdapterTest() {
#ifdef I18N_PHONENUMBERS_USE_RE2
contexts_.push_back(
new RegExpTestContext("RE2", new RE2RegExpFactory()));
#else
contexts_.push_back(
new RegExpTestContext("ICU Regex", new ICURegExpFactory()));
#endif
}
~RegExpAdapterTest() { gtl::STLDeleteElements(&contexts_); }
static string ErrorMessage(const RegExpTestContext& context) {
return StrCat("Test failed with ", context.name, " implementation.");
}
typedef vector<const RegExpTestContext*>::const_iterator TestContextIterator;
vector<const RegExpTestContext*> contexts_;
};
TEST_F(RegExpAdapterTest, TestConsumeNoMatch) {
for (vector<const RegExpTestContext*>::const_iterator it = contexts_.begin();
it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const scoped_ptr<RegExpInput> input(
context.factory->CreateInput("+1-123-456-789"));
ASSERT_FALSE(context.digits->Consume(
input.get(), true, NULL, NULL, NULL, NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("+1-123-456-789", input->ToString()) << ErrorMessage(context);
string res1;
ASSERT_FALSE(context.parentheses_digits->Consume(
input.get(), true, &res1, NULL, NULL, NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("+1-123-456-789", input->ToString()) << ErrorMessage(context);
ASSERT_EQ("", res1) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestConsumeWithNull) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const AbstractRegExpFactory& factory = *context.factory;
const scoped_ptr<RegExpInput> input(factory.CreateInput("+123"));
const scoped_ptr<const RegExp> plus_sign(factory.CreateRegExp("(\\+)"));
ASSERT_TRUE(plus_sign->Consume(input.get(), true, NULL, NULL, NULL, NULL,
NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("123", input->ToString()) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestConsumeRetainsMatches) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const scoped_ptr<RegExpInput> input(
context.factory->CreateInput("1-123-456-789"));
string res1, res2;
ASSERT_TRUE(context.two_digit_groups->Consume(
input.get(), true, &res1, &res2, NULL, NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("-456-789", input->ToString()) << ErrorMessage(context);
ASSERT_EQ("1", res1) << ErrorMessage(context);
ASSERT_EQ("123", res2) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestFindAndConsume) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const scoped_ptr<RegExpInput> input(
context.factory->CreateInput("+1-123-456-789"));
const scoped_ptr<RegExpInput> input_with_six_digit_groups(
context.factory->CreateInput("111-222-333-444-555-666"));
ASSERT_TRUE(context.digits->Consume(input.get(), false, NULL, NULL, NULL,
NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("-123-456-789", input->ToString()) << ErrorMessage(context);
ASSERT_TRUE(context.digits->Consume(input.get(), false, NULL, NULL, NULL,
NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("-456-789", input->ToString()) << ErrorMessage(context);
ASSERT_FALSE(context.parentheses_digits->Consume(
input.get(), false, NULL, NULL, NULL, NULL, NULL, NULL))
<< ErrorMessage(context);
ASSERT_EQ("-456-789", input->ToString()) << ErrorMessage(context);
string res1, res2;
ASSERT_TRUE(context.two_digit_groups->Consume(
input.get(), false, &res1, &res2, NULL, NULL, NULL, NULL))
<< ErrorMessage(context);
printf("previous input: %s", input.get()->ToString().c_str());
ASSERT_EQ("", input->ToString()) << ErrorMessage(context);
ASSERT_EQ("456", res1) << ErrorMessage(context);
ASSERT_EQ("789", res2) << ErrorMessage(context);
string mat1, mat2, res3, res4, res5, res6;
ASSERT_TRUE(context.six_digit_groups->Consume(
input_with_six_digit_groups.get(), false, &mat1, &mat2, &res3, &res4,
&res5, &res6))
<< ErrorMessage(context);
printf("Present input: %s",
input_with_six_digit_groups.get()->ToString().c_str());
ASSERT_EQ("", input_with_six_digit_groups->ToString())
<< ErrorMessage(context);
ASSERT_EQ("111", mat1) << ErrorMessage(context);
ASSERT_EQ("222", mat2) << ErrorMessage(context);
ASSERT_EQ("333", res3) << ErrorMessage(context);
ASSERT_EQ("444", res4) << ErrorMessage(context);
ASSERT_EQ("555", res5) << ErrorMessage(context);
ASSERT_EQ("666", res6) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestPartialMatch) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const AbstractRegExpFactory& factory = *context.factory;
const scoped_ptr<const RegExp> reg_exp(factory.CreateRegExp("([\\da-z]+)"));
string matched;
EXPECT_TRUE(reg_exp->PartialMatch("12345af", &matched))
<< ErrorMessage(context);
EXPECT_EQ("12345af", matched) << ErrorMessage(context);
EXPECT_TRUE(reg_exp->PartialMatch("12345af", NULL))
<< ErrorMessage(context);
EXPECT_TRUE(reg_exp->PartialMatch("[12]", &matched))
<< ErrorMessage(context);
EXPECT_EQ("12", matched) << ErrorMessage(context);
matched.clear();
EXPECT_FALSE(reg_exp->PartialMatch("[]", &matched))
<< ErrorMessage(context);
EXPECT_EQ("", matched) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestFullMatch) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const AbstractRegExpFactory& factory = *context.factory;
const scoped_ptr<const RegExp> reg_exp(factory.CreateRegExp("([\\da-z]+)"));
string matched;
EXPECT_TRUE(reg_exp->FullMatch("12345af", &matched))
<< ErrorMessage(context);
EXPECT_EQ("12345af", matched) << ErrorMessage(context);
EXPECT_TRUE(reg_exp->FullMatch("12345af", NULL)) << ErrorMessage(context);
matched.clear();
EXPECT_FALSE(reg_exp->FullMatch("[12]", &matched)) << ErrorMessage(context);
EXPECT_EQ("", matched) << ErrorMessage(context);
matched.clear();
EXPECT_FALSE(reg_exp->FullMatch("[]", &matched)) << ErrorMessage(context);
EXPECT_EQ("", matched) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestReplace) {
for (vector<const RegExpTestContext*>::const_iterator it = contexts_.begin();
it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
string input("123-4567 ");
ASSERT_TRUE(context.single_digit->Replace(&input, "+"))
<< ErrorMessage(context);
ASSERT_EQ("+23-4567 ", input) << ErrorMessage(context);
ASSERT_TRUE(context.single_digit->Replace(&input, "+"))
<< ErrorMessage(context);
ASSERT_EQ("++3-4567 ", input) << ErrorMessage(context);
const scoped_ptr<const RegExp> single_letter(
context.factory->CreateRegExp("[a-z]"));
ASSERT_FALSE(single_letter->Replace(&input, "+")) << ErrorMessage(context);
ASSERT_EQ("++3-4567 ", input) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestReplaceWithGroup) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
string input = "123-4567 abc";
ASSERT_TRUE(context.two_digit_groups->Replace(&input, "$2"))
<< ErrorMessage(context);
ASSERT_EQ("4567 abc", input) << ErrorMessage(context);
input = "123-4567";
ASSERT_TRUE(context.two_digit_groups->Replace(&input, "$1"))
<< ErrorMessage(context);
ASSERT_EQ("123", input) << ErrorMessage(context);
input = "123-4567";
ASSERT_TRUE(context.two_digit_groups->Replace(&input, "$2"))
<< ErrorMessage(context);
ASSERT_EQ("4567", input) << ErrorMessage(context);
input = "123-4567";
ASSERT_TRUE(context.two_digit_groups->Replace(&input, "$1 $2"))
<< ErrorMessage(context);
ASSERT_EQ("123 4567", input) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestReplaceWithDollarSign) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
string input = "123-4567";
ASSERT_TRUE(context.two_digit_groups->Replace(&input, "\\$1 \\$2"))
<< ErrorMessage(context);
ASSERT_EQ("$1 $2", input) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestGlobalReplace) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
string input("123-4567 ");
ASSERT_TRUE(context.single_digit->GlobalReplace(&input, "*"))
<< ErrorMessage(context);
ASSERT_EQ("***-**** ", input) << ErrorMessage(context);
ASSERT_FALSE(context.single_digit->GlobalReplace(&input, "*"))
<< ErrorMessage(context);
ASSERT_EQ("***-**** ", input) << ErrorMessage(context);
}
}
TEST_F(RegExpAdapterTest, TestUtf8) {
for (TestContextIterator it = contexts_.begin(); it != contexts_.end();
++it) {
const RegExpTestContext& context = **it;
const AbstractRegExpFactory& factory = *context.factory;
const scoped_ptr<const RegExp> reg_exp(factory.CreateRegExp(
"\xE2\x84\xA1\xE2\x8A\x8F([\xCE\xB1-\xCF\x89]*)\xE2\x8A\x90"
));
string matched;
EXPECT_FALSE(reg_exp->Match(
"\xE2\x84\xA1\xE2\x8A\x8F" "123\xE2\x8A\x90" , true,
&matched)) << ErrorMessage(context);
EXPECT_TRUE(reg_exp->Match(
"\xE2\x84\xA1\xE2\x8A\x8F\xCE\xB1\xCE\xB2\xE2\x8A\x90"
, true, &matched)) << ErrorMessage(context);
EXPECT_EQ("\xCE\xB1\xCE\xB2" , matched) << ErrorMessage(context);
}
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/regexp_adapter.h | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/regexp_adapter_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
b171af87-ba8d-4846-b355-b1c99330705c | cpp | tensorflow/tensorflow | encapsulate_tpu_computations_pass | tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.cc | tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass_test.cc | #include "tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <queue>
#include <set>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/tpu/tpu_compile_interface.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
const char* const kTPUReplicatedInput = "TPUReplicatedInput";
const char* const kTPUReplicatedOutput = "TPUReplicatedOutput";
const char* const kPivotForClusterAttr = "_pivot_for_cluster";
const char* const kTPUPartitionedInput = "TPUPartitionedInput";
const char* const kTPUPartitionedInputV2 = "TPUPartitionedInputV2";
Status GetIndexAttr(const Node& n, int num_args, int* index) {
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", index));
if (*index < 0 || *index >= num_args) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid ", n.type_string(), " number ", *index));
}
return absl::OkStatus();
}
Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
auto is_replicated_input = [&](const Node& n, bool* is_packed = nullptr) {
CHECK_EQ("_Arg", n.type_string());
int index;
TF_CHECK_OK(GetIndexAttr(n, arg_source_tensors.size(), &index));
bool ret =
arg_source_tensors.at(index).node->type_string() == kTPUReplicatedInput;
if (is_packed) {
if (!ret || !GetNodeAttr(arg_source_tensors.at(index).node->attrs(),
"is_packed", is_packed)
.ok()) {
*is_packed = false;
}
}
return ret;
};
auto is_guaranteed_constant = [&](const Node& n) {
bool guaranteed_constant = false;
if (!GetNodeAttr(n.attrs(), "_is_guaranteed_constant", &guaranteed_constant)
.ok()) {
return false;
}
return guaranteed_constant && !is_replicated_input(n);
};
Graph* graph = graph_ptr->get();
Node* metadata_node = nullptr;
const int num_args = input_permutation->size();
const int num_retvals = output_permutation->size();
std::vector<Node*> args;
std::vector<Node*> retvals;
args.reserve(num_args);
retvals.reserve(num_retvals);
for (Node* n : graph->nodes()) {
if (n->type_string() == "_Arg") {
args.push_back(n);
} else if (n->type_string() == "_Retval") {
retvals.push_back(n);
} else if (n->type_string() == "TPUReplicateMetadata") {
metadata_node = n;
} else if (!absl::StrContains(n->requested_device(),
DEVICE_TPU_REPLICATED_CORE)) {
n->set_assigned_device_name(
absl::StrCat("/device:", DEVICE_TPU_REPLICATED_CORE));
}
}
if (metadata_node == nullptr) {
return absl::InvalidArgumentError("Missing TPUReplicateMetadata node");
}
for (const auto& attr : metadata_node->attrs()) {
if (attr.first == "computation_shape") {
std::vector<int> shape;
TF_RETURN_IF_ERROR(
GetNodeAttr(metadata_node->attrs(), "computation_shape", &shape));
if (!shape.empty()) {
int64_t num_cores_per_replica = 1LL;
for (int dim : shape) {
num_cores_per_replica *= dim;
}
call_def->mutable_attr()->erase("num_cores_per_replica");
AddNodeAttr("num_cores_per_replica", num_cores_per_replica, call_def);
}
} else {
call_def->mutable_attr()->insert(attr);
}
}
MergeDebugInfo(NodeDebugInfo(metadata_node->def()), call_def);
graph->RemoveNode(metadata_node);
if (std::find(args.begin(), args.end(), nullptr) != args.end()) {
return absl::InvalidArgumentError("Missing or non-consecutive arguments");
}
std::sort(args.begin(), args.end(), [&](Node* a, Node* b) {
bool a_is_guaranteed_constant = is_guaranteed_constant(*a);
bool b_is_guaranteed_constant = is_guaranteed_constant(*b);
bool a_is_packed;
bool b_is_packed;
bool a_not_replicated = !is_replicated_input(*a, &a_is_packed);
bool b_not_replicated = !is_replicated_input(*b, &b_is_packed);
bool a_is_resource = (a->output_type(0) == DT_RESOURCE);
bool b_is_resource = (b->output_type(0) == DT_RESOURCE);
absl::string_view a_name(a->name());
absl::string_view b_name(b->name());
return std::tie(a_is_guaranteed_constant, a_not_replicated, a_is_packed,
a_is_resource, a_name) <
std::tie(b_is_guaranteed_constant, b_not_replicated, b_is_packed,
b_is_resource, b_name);
});
std::sort(retvals.begin(), retvals.end(),
[](Node* a, Node* b) { return a->name() < b->name(); });
int variable_start_index = num_args;
int guaranteed_const_start_index = num_args;
for (int i = 0; i < num_args; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*args[i], num_args, &index));
if (args[i]->output_type(0) == DT_RESOURCE &&
!is_replicated_input(*args[i]) && variable_start_index == num_args) {
variable_start_index = i;
} else if (is_guaranteed_constant(*args[i]) &&
guaranteed_const_start_index == num_args) {
guaranteed_const_start_index = i;
}
(*input_permutation)[index] = i;
args[i]->AddAttr("index", i);
}
VLOG(4) << "variable_start_index: " << variable_start_index
<< " guaranteed_const_start_index: " << guaranteed_const_start_index;
for (int i = 0; i < num_retvals; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*retvals[i], num_retvals, &index));
(*output_permutation)[index] = i;
retvals[i]->AddAttr("index", i);
}
AddNodeAttr(kTPUReplicateAttr, call_def->name(), call_def);
AddNodeAttr("_variable_start_index", variable_start_index, call_def);
AddNodeAttr("_guaranteed_const_start_index", guaranteed_const_start_index,
call_def);
TF_ASSIGN_OR_RETURN(std::string serialized,
SerializeGraphDeterministic(*graph));
uint64_t fingerprint =
TpuCompileInterface::Get()->FingerprintString(serialized);
LOG(INFO) << "Subgraph fingerprint:" << fingerprint;
call_def->set_op(absl::StrCat(call_def->op(), "_", fingerprint));
return absl::OkStatus();
}
DataType EdgeType(const Edge* edge) {
return edge->dst()->input_type(edge->dst_input());
}
void AddControlInputs(const Node& node, gtl::FlatSet<Node*>* deps) {
for (const Edge* edge : node.in_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->src());
}
}
}
void AddControlOutputs(const Node& node, gtl::FlatSet<Node*>* deps) {
for (const Edge* edge : node.out_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->dst());
}
}
}
Status RemoveIdentityNodesForArgRetval(Graph* g) {
std::vector<Node*> identity_nodes;
for (Node* n : g->nodes()) {
if (n->type_string() == "Identity" &&
(HasNodeAttr(n->def(), "_tpu_input_identity") ||
HasNodeAttr(n->def(), "_tpu_output_identity"))) {
identity_nodes.push_back(n);
}
}
for (Node* n : identity_nodes) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(n->input_edge(0, &input_edge));
std::vector<const Edge*> output_edges;
for (const Edge* e : n->out_edges()) {
output_edges.push_back(e);
}
for (const Edge* e : output_edges) {
if (e->IsControlEdge()) {
Node* dst = e->dst();
g->RemoveEdge(e);
g->AddControlEdge(input_edge->src(), dst);
} else {
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
}
g->RemoveNode(n);
}
return absl::OkStatus();
}
Status UpdateMirroredVariableIndices(int additional_per_replica_inputs,
Node* xla_node) {
std::vector<int> mirrored_variable_indices;
if (xla_node->attrs().Find(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR) !=
nullptr) {
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(),
TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
&mirrored_variable_indices));
}
if (!mirrored_variable_indices.empty()) {
for (int i = 0; i < mirrored_variable_indices.size(); ++i)
mirrored_variable_indices[i] += additional_per_replica_inputs;
xla_node->ClearAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR);
xla_node->AddAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
mirrored_variable_indices);
}
return absl::OkStatus();
}
Status MoveHeadOutsideCompilationToHost(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, const std::string& cluster_name, Graph* g,
Graph* xla_graph, Node* xla_node, Node* pivot_node) {
std::vector<Node*> oc_nodes_at_head;
const std::string kOnlyArgOrOcInputAttrName = "_xla_only_arg_or_oc_input";
ReverseDFS(
*xla_graph, nullptr,
[&](Node* n) {
bool has_non_arg_or_oc_input = false;
for (const Edge* e : n->in_edges()) {
if (e->src() == xla_graph->source_node()) {
continue;
}
if (!e->src()->IsArg() &&
(!HasNodeAttr(e->src()->def(), outside_compilation_attr_name) ||
!HasNodeAttr(e->src()->def(), kOnlyArgOrOcInputAttrName))) {
has_non_arg_or_oc_input = true;
break;
}
}
if (HasNodeAttr(n->def(), outside_compilation_attr_name) &&
!has_non_arg_or_oc_input &&
!HasNodeAttr(n->def(), kXlaIsPlaceholderForArg)) {
n->AddAttr(kOnlyArgOrOcInputAttrName, true);
oc_nodes_at_head.push_back(n);
}
},
NodeComparatorName());
std::vector<Node*> const_nodes_to_remove;
for (Node* n : oc_nodes_at_head) {
if (n->type_string() != "Const") {
continue;
}
std::vector<const Edge*> edges_to_replace;
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge() &&
HasNodeAttr(e->dst()->def(), outside_compilation_attr_name) &&
!HasNodeAttr(e->dst()->def(), kOnlyArgOrOcInputAttrName)) {
edges_to_replace.push_back(e);
}
}
if (edges_to_replace.empty()) {
continue;
}
Node* const_copy = xla_graph->CopyNode(n);
for (const Edge* e : edges_to_replace) {
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(const_copy, 0, dst, dst_input);
}
xla_graph->AddControlEdge(xla_graph->source_node(), const_copy);
bool has_output_edge = false;
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge()) {
has_output_edge = true;
break;
}
}
if (!has_output_edge) {
const_nodes_to_remove.push_back(n);
}
}
for (Node* n : const_nodes_to_remove) {
xla_graph->RemoveNode(n);
oc_nodes_at_head.erase(
std::remove(oc_nodes_at_head.begin(), oc_nodes_at_head.end(), n),
oc_nodes_at_head.end());
}
if (VLOG_IS_ON(5)) {
for (Node* n : oc_nodes_at_head) {
VLOG(5) << "oc_nodes_at_head: " << n->DebugString();
}
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int old_num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
VLOG(5) << "old_num_per_replica_inputs: " << old_num_per_replica_inputs;
absl::flat_hash_map<Node*, std::vector<Node*>> node_images;
for (Node* n : oc_nodes_at_head) {
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
NodeDef copy_def = n->def();
copy_def.set_name(absl::StrCat(n->name(), "_head_oc/R", replica_id));
copy_def.clear_device();
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
copy_node->AddAttr(kXlaReplicaIdAttrName, replica_id);
copy_node->AddAttr(kTPUReplicateAttr, cluster_name);
for (const Edge* e : n->in_edges()) {
if (e->src() == xla_graph->source_node()) {
continue;
}
if (e->src()->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->attrs(), "index", &index));
const int new_index =
(index < old_num_per_replica_inputs)
? (old_num_per_replica_inputs * replica_id + index)
: (old_num_per_replica_inputs * num_replicas +
(index - old_num_per_replica_inputs));
const Edge* original_edge = input_edges.at(new_index);
g->AddEdge(original_edge->src(), original_edge->src_output(),
copy_node, e->dst_input());
} else {
g->AddEdge(node_images[e->src()][replica_id], e->src_output(),
copy_node, e->dst_input());
}
}
g->AddControlEdge(copy_node, xla_node);
if (pivot_node) {
g->AddControlEdge(pivot_node, copy_node);
}
node_images[n].push_back(copy_node);
}
}
std::vector<const Edge*> oc_output_edges;
std::vector<DataType> new_arg_types;
for (Node* n : oc_nodes_at_head) {
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge() &&
node_images.find(e->dst()) == node_images.end()) {
VLOG(5) << "oc_output_edges: " << e->DebugString();
oc_output_edges.push_back(e);
new_arg_types.push_back(e->src()->output_type(e->src_output()));
}
}
}
int new_num_per_replica_inputs =
old_num_per_replica_inputs + oc_output_edges.size();
VLOG(5) << "new_num_per_replica_inputs: " << new_num_per_replica_inputs;
int num_variables;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "NumVariables", &num_variables));
std::vector<DataType> broadcast_input_types, guaranteed_constant_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tbroadcast_inputs",
&broadcast_input_types));
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tguaranteed_constants",
&guaranteed_constant_types));
int num_other_inputs = num_distributed_vars + num_variables +
broadcast_input_types.size() +
guaranteed_constant_types.size();
VLOG(5) << "num_other_inputs: " << num_other_inputs;
std::vector<DataType> new_input_types;
new_input_types.reserve(num_replicas * new_num_per_replica_inputs +
num_distributed_vars);
for (int replica_id = 0; replica_id < num_replicas; ++replica_id) {
for (int i = 0; i < old_num_per_replica_inputs; ++i) {
new_input_types.push_back(input_types[i]);
}
for (int i = old_num_per_replica_inputs; i < new_num_per_replica_inputs;
++i) {
new_input_types.push_back(new_arg_types[i - old_num_per_replica_inputs]);
}
}
const int num_new_per_replica_input_types = new_input_types.size();
for (int i = input_types.size() - num_distributed_vars;
i < input_types.size(); i++) {
new_input_types.push_back(input_types[i]);
}
xla_node->ClearAttr("Tinputs");
xla_node->AddAttr("Tinputs", new_input_types);
TF_RETURN_IF_ERROR(UpdateMirroredVariableIndices(
oc_output_edges.size(), xla_node));
int new_variable_start_index =
num_new_per_replica_input_types / num_replicas + num_distributed_vars +
broadcast_input_types.size();
if (xla_node->attrs().Find("_variable_start_index") != nullptr) {
xla_node->ClearAttr("_variable_start_index");
xla_node->AddAttr("_variable_start_index", new_variable_start_index);
}
int new_guaranteed_const_start_index =
new_variable_start_index + num_variables;
if (xla_node->attrs().Find("_guaranteed_const_start_index") != nullptr) {
xla_node->ClearAttr("_guaranteed_const_start_index");
xla_node->AddAttr("_guaranteed_const_start_index",
new_guaranteed_const_start_index);
}
std::vector<const Edge*> new_input_edges(
num_replicas * new_num_per_replica_inputs + num_other_inputs);
int end_input_index =
num_replicas * new_num_per_replica_inputs + num_other_inputs - 1;
int start_input_index = end_input_index + 1 - num_other_inputs;
for (int input_index = end_input_index; input_index >= start_input_index;
input_index--) {
const Edge* e =
input_edges.at(input_index - num_replicas * new_arg_types.size());
Node* src = e->src();
int src_output = e->src_output();
g->RemoveEdge(e);
const Edge* new_input_edge =
g->AddEdge(src, src_output, xla_node, input_index);
new_input_edges[input_index] = new_input_edge;
}
std::vector<std::pair<Node*, int>> per_replica_inputs;
std::vector<const Edge*> old_per_replica_edges;
for (int i = 0; i < old_num_per_replica_inputs * num_replicas; i++) {
const Edge* e = input_edges.at(i);
per_replica_inputs.push_back(std::make_pair(e->src(), e->src_output()));
old_per_replica_edges.push_back(e);
}
for (const Edge* e : old_per_replica_edges) {
g->RemoveEdge(e);
}
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int input_index = 0; input_index < old_num_per_replica_inputs;
input_index++) {
Node* src = per_replica_inputs[replica_id * old_num_per_replica_inputs +
input_index]
.first;
int src_output =
per_replica_inputs[replica_id * old_num_per_replica_inputs +
input_index]
.second;
const Edge* new_input_edge =
g->AddEdge(src, src_output, xla_node,
replica_id * new_num_per_replica_inputs + input_index);
new_input_edges[input_index] = new_input_edge;
}
for (int input_index = old_num_per_replica_inputs;
input_index < new_num_per_replica_inputs; input_index++) {
Node* original_src =
oc_output_edges[input_index - old_num_per_replica_inputs]->src();
int original_src_output =
oc_output_edges[input_index - old_num_per_replica_inputs]
->src_output();
Node* src = node_images[original_src][replica_id];
const Edge* new_input_edge =
g->AddEdge(src, original_src_output, xla_node,
replica_id * new_num_per_replica_inputs + input_index);
new_input_edges[input_index] = new_input_edge;
}
}
for (Node* n : xla_graph->nodes()) {
if (n->IsArg()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
if (index >= old_num_per_replica_inputs) {
index += new_arg_types.size();
n->ClearAttr("index");
n->AddAttr("index", index);
}
}
}
for (int i = old_num_per_replica_inputs; i < new_num_per_replica_inputs;
i++) {
NodeDefBuilder arg_builder(absl::StrCat("arg_", i),
FunctionLibraryDefinition::kArgOp);
arg_builder.Attr("T", new_arg_types[i - old_num_per_replica_inputs]);
arg_builder.Attr("index", i);
NodeDef arg_def;
TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def));
TF_ASSIGN_OR_RETURN(Node * arg_node, xla_graph->AddNode(arg_def));
const Edge* original_edge = oc_output_edges[i - old_num_per_replica_inputs];
Node* dst = original_edge->dst();
int dst_input = original_edge->dst_input();
xla_graph->RemoveEdge(original_edge);
xla_graph->AddEdge(arg_node, 0, dst, dst_input);
}
for (Node* n : oc_nodes_at_head) {
bool is_lifted_arg;
std::string outside_compilation_attr;
if (!TryGetNodeAttr(n->def(), kXlaIsLiftedArgAttrName, &is_lifted_arg) ||
!TryGetNodeAttr(n->def(), kOutsideCompilationAttr,
&outside_compilation_attr)) {
continue;
}
TF_RET_CHECK(n->IsIdentity());
NodeDefBuilder ph_builder(absl::StrCat("placeholder_", n->name()),
"Placeholder");
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
ph_builder.Attr("dtype", dtype);
ph_builder.Attr(kXlaIsLiftedArgAttrName, true);
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_attr);
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
Status s;
xla_graph->AddNode(ph_def, &s);
TF_RETURN_IF_ERROR(s);
Node* input_node;
TF_RETURN_IF_ERROR(n->input_node(0, &input_node));
TF_RET_CHECK(input_node->type_string() == "_Arg");
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(input_node->def(), "index", &index));
TF_RET_CHECK(index >= new_num_per_replica_inputs + num_distributed_vars);
const Edge* input_edge =
new_input_edges.at(num_replicas * new_num_per_replica_inputs + index -
new_num_per_replica_inputs);
NodeDefBuilder id_builder(absl::StrCat("lifted_arg_input_", index),
"IdentityN");
DataType input_dtype =
input_edge->src()->output_type(input_edge->src_output());
id_builder.Attr("T", std::vector<DataType>(num_replicas, input_dtype));
std::vector<NodeDefBuilder::NodeOut> inputs(
num_replicas,
NodeDefBuilder::NodeOut{input_edge->src()->name(),
input_edge->src_output(), input_dtype});
id_builder.Attr(kXlaOutsideCompilationInputsAttrName,
outside_compilation_attr);
id_builder.Input(inputs);
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
for (int i = 0; i < num_replicas; i++) {
g->AddEdge(input_edge->src(), input_edge->src_output(), id_node, i);
}
}
for (Node* n : oc_nodes_at_head) {
xla_graph->RemoveNode(n);
}
VLOG(4) << "MoveHeadOutsideCompilationToHost host graph: "
<< DumpGraphToFile(absl::StrCat("move_head_oc_host_", xla_func_name),
*g);
VLOG(4) << "MoveHeadOutsideCompilationToHost XLA graph: "
<< DumpGraphToFile(absl::StrCat("move_head_oc_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveUnusedXlaInput(const std::string& xla_func_name, Graph* g,
Graph* xla_graph, Node* xla_node) {
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tinputs", &input_types));
std::vector<int> mirrored_variable_indices;
if (xla_node->attrs().Find(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR) !=
nullptr) {
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(),
TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
&mirrored_variable_indices));
}
std::vector<DataType> broadcast_input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tbroadcast_inputs",
&broadcast_input_types));
std::vector<DataType> guaranteed_constant_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tguaranteed_constants",
&guaranteed_constant_types));
int num_variables;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "NumVariables", &num_variables));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
std::set<int> arg_indices_to_remove;
std::vector<Node*> arg_nodes_to_update, nodes_to_remove;
int num_args = 0, num_removed_per_replica_inputs = 0,
num_removed_distributed_vars = 0;
for (Node* n : xla_graph->nodes()) {
if (!n->IsArg()) {
continue;
}
bool has_output = false;
for (const Edge* e : n->out_edges()) {
if (e->dst() != xla_graph->sink_node()) {
has_output = true;
break;
}
}
num_args++;
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
if (has_output) {
arg_nodes_to_update.push_back(n);
continue;
}
arg_indices_to_remove.insert(index);
if (index < num_per_replica_inputs) {
num_removed_per_replica_inputs++;
} else if (index < num_per_replica_inputs + num_distributed_vars) {
num_removed_distributed_vars++;
}
nodes_to_remove.push_back(n);
}
for (Node* n : nodes_to_remove) {
xla_graph->RemoveNode(n);
}
std::map<int, int> arg_index_mapping;
int new_arg_index = 0;
for (int i = 0; i < num_args; i++) {
if (arg_indices_to_remove.find(i) != arg_indices_to_remove.end()) {
continue;
} else {
arg_index_mapping[i] = new_arg_index;
new_arg_index++;
}
}
for (Node* n : arg_nodes_to_update) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
n->ClearAttr("index");
n->AddAttr("index", arg_index_mapping[index]);
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
const int num_new_per_replica_inputs =
num_per_replica_inputs - num_removed_per_replica_inputs;
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_per_replica_inputs; j++) {
auto iter = arg_index_mapping.find(j);
if (iter != arg_index_mapping.end()) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + j);
Node* src = e->src();
int src_output = e->src_output();
int dst_input = i * num_new_per_replica_inputs + iter->second;
g->RemoveEdge(e);
g->AddEdge(src, src_output, xla_node, dst_input);
} else {
const Edge* e = input_edges.at(i * num_per_replica_inputs + j);
g->RemoveEdge(e);
}
}
}
for (int i = num_replicas * num_per_replica_inputs;
i < xla_node->num_inputs(); i++) {
int arg_index =
num_per_replica_inputs + i - num_replicas * num_per_replica_inputs;
auto iter = arg_index_mapping.find(arg_index);
if (iter != arg_index_mapping.end()) {
const Edge* e = input_edges.at(i);
Node* src = e->src();
int src_output = e->src_output();
int dst_input = num_replicas * num_new_per_replica_inputs + iter->second -
num_new_per_replica_inputs;
g->RemoveEdge(e);
g->AddEdge(src, src_output, xla_node, dst_input);
} else {
const Edge* e = input_edges.at(i);
g->RemoveEdge(e);
}
}
std::vector<DataType> new_input_types;
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_per_replica_inputs; j++) {
auto iter = arg_index_mapping.find(j);
if (iter != arg_index_mapping.end()) {
new_input_types.push_back(input_types[iter->first]);
}
}
}
for (int i = 0; i < num_distributed_vars; ++i) {
auto iter = arg_index_mapping.find(i + num_per_replica_inputs);
if (iter != arg_index_mapping.end()) {
new_input_types.push_back(
input_types[iter->first - num_per_replica_inputs +
num_per_replica_inputs * num_replicas]);
}
}
xla_node->ClearAttr("Tinputs");
xla_node->AddAttr("Tinputs", new_input_types);
const int num_new_distributed_vars =
num_distributed_vars - num_removed_distributed_vars;
xla_node->ClearAttr("num_distributed_variables");
xla_node->AddAttr("num_distributed_variables", num_new_distributed_vars);
if (!mirrored_variable_indices.empty()) {
std::vector<int> new_mirrored_variable_indices;
absl::flat_hash_set<int> old_mirrored_variable_indices_set;
for (int index : mirrored_variable_indices) {
old_mirrored_variable_indices_set.insert(index);
}
for (int i = 0; i < num_per_replica_inputs + num_distributed_vars; i++) {
auto iter = arg_index_mapping.find(i);
if (iter != arg_index_mapping.end() &&
old_mirrored_variable_indices_set.contains(iter->first)) {
new_mirrored_variable_indices.push_back(iter->second);
}
}
xla_node->ClearAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR);
xla_node->AddAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
new_mirrored_variable_indices);
}
int num_replicated_inputs = num_per_replica_inputs + num_distributed_vars;
std::vector<DataType> new_broadcast_input_types;
for (int i = 0; i < broadcast_input_types.size(); i++) {
int arg_index = num_replicated_inputs + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_broadcast_input_types.push_back(broadcast_input_types[i]);
}
}
xla_node->ClearAttr("Tbroadcast_inputs");
xla_node->AddAttr("Tbroadcast_inputs", new_broadcast_input_types);
int new_num_variables = 0;
for (int i = 0; i < num_variables; i++) {
int arg_index = num_replicated_inputs + broadcast_input_types.size() + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_num_variables++;
}
}
xla_node->ClearAttr("NumVariables");
xla_node->AddAttr("NumVariables", new_num_variables);
std::vector<DataType> new_guaranteed_constant_types;
for (int i = 0; i < guaranteed_constant_types.size(); i++) {
int arg_index = num_replicated_inputs + broadcast_input_types.size() +
num_variables + i;
if (arg_index_mapping.find(arg_index) != arg_index_mapping.end()) {
new_guaranteed_constant_types.push_back(guaranteed_constant_types[i]);
}
}
xla_node->ClearAttr("Tguaranteed_constants");
xla_node->AddAttr("Tguaranteed_constants", new_guaranteed_constant_types);
int new_variable_start_index = num_new_per_replica_inputs +
num_new_distributed_vars +
new_broadcast_input_types.size();
if (xla_node->attrs().Find("_variable_start_index") != nullptr) {
xla_node->ClearAttr("_variable_start_index");
xla_node->AddAttr("_variable_start_index", new_variable_start_index);
}
int new_guaranteed_const_start_index =
new_variable_start_index + new_num_variables;
if (xla_node->attrs().Find("_guaranteed_const_start_index") != nullptr) {
xla_node->ClearAttr("_guaranteed_const_start_index");
xla_node->AddAttr("_guaranteed_const_start_index",
new_guaranteed_const_start_index);
}
VLOG(4) << "RemoveUnusedXlaInput host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_input_host_", xla_func_name), *g);
VLOG(4) << "RemoveUnusedXlaInput XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_input_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status MoveTailOutsideCompilationToHost(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, const std::string& cluster_name, Graph* g,
Graph* xla_graph, Node* xla_node, Node* pivot_node) {
std::vector<Node*> oc_nodes_at_tail;
const std::string kOnlyRetOrOcOutputAttrName = "_xla_only_ret_or_oc_output";
DFS(
*xla_graph, nullptr,
[&](Node* n) {
bool has_non_ret_or_oc_output = false;
for (const Edge* e : n->out_edges()) {
if (e->dst() == xla_graph->sink_node()) {
continue;
}
if (!e->dst()->IsRetval() &&
(!HasNodeAttr(e->dst()->def(), outside_compilation_attr_name) ||
!HasNodeAttr(e->dst()->def(), kOnlyRetOrOcOutputAttrName))) {
has_non_ret_or_oc_output = true;
break;
}
}
if (HasNodeAttr(n->def(), outside_compilation_attr_name) &&
!has_non_ret_or_oc_output) {
n->AddAttr(kOnlyRetOrOcOutputAttrName, true);
oc_nodes_at_tail.push_back(n);
}
},
NodeComparatorName());
if (VLOG_IS_ON(5)) {
for (Node* n : oc_nodes_at_tail) {
VLOG(5) << "oc_nodes_at_tail: " << n->DebugString();
}
}
std::vector<const Edge*> oc_input_edges;
std::vector<DataType> new_ret_types;
for (Node* n : oc_nodes_at_tail) {
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() &&
!HasNodeAttr(e->src()->def(), kOnlyRetOrOcOutputAttrName)) {
VLOG(5) << "oc_input_edges: " << e->DebugString();
oc_input_edges.push_back(e);
new_ret_types.push_back(e->src()->output_type(e->src_output()));
}
}
}
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "output_types", &output_types));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int old_num_replicated_outputs = output_types.size() / num_replicas;
int new_num_replicated_outputs =
old_num_replicated_outputs + oc_input_edges.size();
VLOG(5) << "old_num_replicated_outputs: " << old_num_replicated_outputs;
VLOG(5) << "new_num_replicated_outputs: " << new_num_replicated_outputs;
std::vector<DataType> new_output_types;
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int i = 0; i < old_num_replicated_outputs; i++) {
new_output_types.push_back(output_types[i]);
}
for (int i = old_num_replicated_outputs; i < new_num_replicated_outputs;
i++) {
new_output_types.push_back(new_ret_types[i - old_num_replicated_outputs]);
}
}
xla_node->ClearAttr("output_types");
xla_node->AddAttr("output_types", new_output_types);
std::vector<std::vector<std::pair<Node*, int>>> replicated_outputs(
old_num_replicated_outputs * num_replicas);
std::vector<const Edge*> old_replicated_edges;
for (const Edge* e : xla_node->out_edges()) {
if (e->src_output() >= 0 &&
e->src_output() < old_num_replicated_outputs * num_replicas) {
replicated_outputs[e->src_output()].push_back(
std::make_pair(e->dst(), e->dst_input()));
old_replicated_edges.push_back(e);
}
}
for (const Edge* e : old_replicated_edges) {
g->RemoveEdge(e);
}
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
for (int output_index = 0; output_index < old_num_replicated_outputs;
output_index++) {
for (const auto& node_input_pair :
replicated_outputs[replica_id * old_num_replicated_outputs +
output_index]) {
Node* dst = node_input_pair.first;
int dst_input = node_input_pair.second;
g->AddEdge(xla_node,
replica_id * new_num_replicated_outputs + output_index, dst,
dst_input);
}
}
}
absl::flat_hash_map<Node*, std::vector<Node*>> node_images;
for (Node* n : oc_nodes_at_tail) {
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
NodeDef copy_def = n->def();
copy_def.set_name(absl::StrCat(n->name(), "_tail_oc/R", replica_id));
copy_def.clear_device();
TF_ASSIGN_OR_RETURN(Node * copy_node, g->AddNode(copy_def));
copy_node->AddAttr(kXlaReplicaIdAttrName, replica_id);
copy_node->AddAttr(kTPUReplicateAttr, cluster_name);
for (const Edge* e : n->out_edges()) {
if (e->dst() == xla_graph->sink_node()) {
continue;
}
if (e->dst()->IsRetval()) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->attrs(), "index", &index));
for (const auto& output :
replicated_outputs[replica_id * old_num_replicated_outputs +
index]) {
const Edge* original_edge;
Status s = output.first->input_edge(output.second, &original_edge);
if (s.ok()) {
g->RemoveEdge(original_edge);
}
g->AddEdge(copy_node, e->src_output(), output.first, output.second);
}
} else {
g->AddEdge(copy_node, e->src_output(),
node_images[e->dst()][replica_id], e->dst_input());
}
}
copy_node->AddAttr("_xla_tail_outside_compilation", true);
g->AddControlEdge(xla_node, copy_node);
if (pivot_node) {
g->AddControlEdge(pivot_node, copy_node);
}
node_images[n].push_back(copy_node);
}
}
for (int i = 0; i < new_ret_types.size(); i++) {
const Edge* original_edge = oc_input_edges[i];
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
int src_output = replica_id * new_num_replicated_outputs +
old_num_replicated_outputs + i;
Node* dst = node_images[original_edge->dst()][replica_id];
g->AddEdge(xla_node, src_output, dst, original_edge->dst_input());
}
}
for (int i = old_num_replicated_outputs; i < new_num_replicated_outputs;
i++) {
NodeDefBuilder ret_builder(absl::StrCat("ret_", i),
FunctionLibraryDefinition::kRetOp);
ret_builder.Attr("T", new_ret_types[i - old_num_replicated_outputs]);
ret_builder.Attr("index", i);
const Edge* original_edge = oc_input_edges[i - old_num_replicated_outputs];
Node* src = original_edge->src();
int src_output = original_edge->src_output();
ret_builder.Input(src->name(), src_output, src->output_type(src_output));
NodeDef ret_def;
TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def));
TF_ASSIGN_OR_RETURN(Node * ret_node, xla_graph->AddNode(ret_def));
xla_graph->RemoveEdge(original_edge);
xla_graph->AddEdge(src, src_output, ret_node, 0);
}
for (Node* n : oc_nodes_at_tail) {
xla_graph->RemoveNode(n);
}
std::vector<Node*> unused_rets;
for (Node* n : xla_graph->nodes()) {
if (n->IsRetval() && n->in_edges().empty()) {
unused_rets.push_back(n);
}
}
for (Node* n : unused_rets) {
NodeDefBuilder builder(absl::StrCat("placeholder_", n->name()),
"Placeholder");
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
builder.Attr("dtype", dtype);
builder.Attr(kXlaIsPlaceholderForTailOcAttrName, true);
NodeDef def;
TF_RETURN_IF_ERROR(builder.Finalize(&def));
TF_ASSIGN_OR_RETURN(Node * placeholder, xla_graph->AddNode(def));
xla_graph->AddEdge(placeholder, 0, n, 0);
}
VLOG(4) << "MoveTailOutsideCompilationToHost host graph: "
<< DumpGraphToFile(absl::StrCat("move_tail_oc_host_", xla_func_name),
*g);
VLOG(4) << "MoveTaildOutsideCompilationToHost XLA graph: "
<< DumpGraphToFile(absl::StrCat("move_tail_oc_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status ReplaceArgUsedByOutsideCompilationWithPlaceholder(
const std::string& outside_compilation_attr_name,
const std::string& xla_func_name, Graph* g, Graph* xla_graph,
Node* xla_node) {
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->attrs(), "num_replicas", &num_replicas));
int num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
for (Node* n : xla_graph->op_nodes()) {
if (!n->IsArg()) {
continue;
}
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
if (dtype != DT_RESOURCE) {
continue;
}
std::vector<const Edge*> oc_out_edges;
for (const Edge* e : n->out_edges()) {
if (e->IsControlEdge() ||
!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
continue;
}
oc_out_edges.push_back(e);
}
if (oc_out_edges.empty()) {
continue;
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
std::string oc_identifier = absl::StrCat("oc_only_arg_", index);
NodeDefBuilder id_builder(absl::StrCat(oc_identifier, "_inputs"),
"IdentityN");
std::vector<DataType> dtypes(num_replicas, dtype);
id_builder.Attr("T", dtypes);
id_builder.Attr(kXlaOutsideCompilationInputsAttrName, oc_identifier);
std::vector<NodeDefBuilder::NodeOut> inputs(num_replicas);
if (index >= num_per_replica_inputs) {
const Edge* e = input_edges.at(num_replicas * num_per_replica_inputs +
(index - num_per_replica_inputs));
for (int i = 0; i < num_replicas; i++) {
inputs[i] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
} else {
for (int i = 0; i < num_replicas; i++) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + index);
inputs[i] =
NodeDefBuilder::NodeOut{e->src()->name(), e->src_output(),
e->src()->output_type(e->src_output())};
}
}
id_builder.Input(inputs);
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
if (index >= num_per_replica_inputs) {
const Edge* e = input_edges.at(num_replicas * num_per_replica_inputs +
(index - num_per_replica_inputs));
for (int i = 0; i < num_replicas; i++) {
g->AddEdge(e->src(), e->src_output(), id_node, i);
}
} else {
for (int i = 0; i < num_replicas; i++) {
const Edge* e = input_edges.at(i * num_per_replica_inputs + index);
g->AddEdge(e->src(), e->src_output(), id_node, i);
}
}
for (const Edge* e : oc_out_edges) {
NodeDefBuilder ph_builder(xla_graph->NewName("ph_for_arg_in_oc_"),
"Placeholder");
ph_builder.Attr("dtype", dtype);
std::string outside_compilation_attr;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), kOutsideCompilationAttr,
&outside_compilation_attr));
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_attr);
ph_builder.Attr(kXlaOutsideCompilationInputsAttrName, oc_identifier);
ph_builder.Attr(kXlaIsPlaceholderForArg, true);
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
TF_ASSIGN_OR_RETURN(Node * ph_node, xla_graph->AddNode(ph_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(ph_node, 0, dst, dst_input);
xla_graph->AddControlEdge(xla_graph->source_node(), ph_node);
}
}
VLOG(4) << "ReplaceOutsideCompilationOnlyArgWithPlaceholder host graph: "
<< DumpGraphToFile(
absl::StrCat("replace_oc_only_arg_host_", xla_func_name), *g);
VLOG(4) << "ReplaceOutsideCompilationOnlyArgWithPlaceholder XLA graph: "
<< DumpGraphToFile(
absl::StrCat("replace_oc_only_arg_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveUnusedXlaOutput(const std::string& xla_func_name, Graph* g,
Graph* xla_graph, Node* xla_node) {
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "output_types", &output_types));
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
int num_replicated_outputs = output_types.size() / num_replicas;
std::set<int> ret_indices_to_remove;
std::vector<Node*> ret_nodes_to_update, nodes_to_remove;
int num_rets = 0;
for (Node* n : xla_graph->nodes()) {
if (!n->IsRetval()) {
continue;
}
num_rets++;
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(0, &e));
if (e->src()->type_string() != "Placeholder" ||
!HasNodeAttr(e->src()->def(), kXlaIsPlaceholderForTailOcAttrName)) {
ret_nodes_to_update.push_back(n);
continue;
}
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
ret_indices_to_remove.insert(index);
nodes_to_remove.push_back(e->src());
nodes_to_remove.push_back(n);
}
for (Node* n : nodes_to_remove) {
xla_graph->RemoveNode(n);
}
std::map<int, int> ret_index_mapping;
int new_ret_index = 0;
for (int i = 0; i < num_rets; i++) {
if (ret_indices_to_remove.find(i) != ret_indices_to_remove.end()) {
continue;
} else {
ret_index_mapping[i] = new_ret_index;
new_ret_index++;
}
}
for (Node* n : ret_nodes_to_update) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
n->ClearAttr("index");
n->AddAttr("index", ret_index_mapping[index]);
}
std::vector<DataType> new_output_types;
for (int i = 0; i < num_replicas; i++) {
for (const auto& e : ret_index_mapping) {
new_output_types.push_back(output_types[e.first]);
}
}
xla_node->ClearAttr("output_types");
xla_node->AddAttr("output_types", new_output_types);
std::vector<std::vector<const Edge*>> output_edges(num_replicas *
num_replicated_outputs);
for (const Edge* e : xla_node->out_edges()) {
if (e->src_output() >= 0 &&
e->src_output() < num_replicas * num_replicated_outputs) {
output_edges[e->src_output()].push_back(e);
}
}
for (int i = 0; i < num_replicas; i++) {
for (int j = 0; j < num_replicated_outputs; j++) {
auto iter = ret_index_mapping.find(j);
if (iter != ret_index_mapping.end()) {
for (const Edge* e : output_edges[i * num_replicated_outputs + j]) {
Node* dst = e->dst();
int dst_input = e->dst_input();
int src_output =
i * (num_replicated_outputs - ret_indices_to_remove.size()) +
iter->second;
g->RemoveEdge(e);
g->AddEdge(xla_node, src_output, dst, dst_input);
}
} else {
TF_RET_CHECK(output_edges[i * num_replicated_outputs + j].empty())
<< "Output edge not removed: "
<< output_edges[i * num_replicated_outputs + j][0]->DebugString();
}
}
}
VLOG(4) << "RemoveUnusedXlaOutput host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_output_host_", xla_func_name), *g);
VLOG(4) << "RemoveUnusedXlaOutput XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_output_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
Status RemoveEdgesBetweenArgAndRetval(const std::string& xla_func_name,
Graph* g, Graph* xla_graph,
Node* xla_node) {
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "num_replicas", &num_replicas));
std::vector<DataType> input_types;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->def(), "Tinputs", &input_types));
int num_distributed_vars;
TF_RETURN_IF_ERROR(GetNodeAttr(xla_node->attrs(), "num_distributed_variables",
&num_distributed_vars));
int old_num_per_replica_inputs =
(input_types.size() - num_distributed_vars) / num_replicas;
std::vector<DataType> output_types;
TF_RETURN_IF_ERROR(
GetNodeAttr(xla_node->def(), "output_types", &output_types));
int old_num_outputs = output_types.size() / num_replicas;
std::vector<const Edge*> edges;
for (const Edge* e : xla_graph->edges()) {
if (!e->IsControlEdge() && e->src()->IsArg() && e->dst()->IsRetval()) {
edges.push_back(e);
}
}
std::vector<std::vector<const Edge*>> xla_node_out_edges(
xla_node->num_outputs());
for (const Edge* e : xla_node->out_edges()) {
if (!e->IsControlEdge()) {
xla_node_out_edges[e->src_output()].push_back(e);
}
}
std::vector<const Edge*> input_edges;
TF_RETURN_IF_ERROR(xla_node->input_edges(&input_edges));
for (const Edge* e : edges) {
int arg_index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->def(), "index", &arg_index));
int ret_index;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), "index", &ret_index));
for (int replica_id = 0; replica_id < num_replicas; replica_id++) {
int input_index;
if (arg_index < old_num_per_replica_inputs) {
input_index = replica_id * old_num_per_replica_inputs + arg_index;
} else {
input_index = num_replicas * old_num_per_replica_inputs +
(arg_index - old_num_per_replica_inputs);
}
const Edge* input_edge = input_edges.at(input_index);
int output_index = replica_id * old_num_outputs + ret_index;
for (const Edge* output_edge : xla_node_out_edges[output_index]) {
Node* dst = output_edge->dst();
int dst_input = output_edge->dst_input();
g->RemoveEdge(output_edge);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
}
}
for (const Edge* e : edges) {
NodeDefBuilder placeholder_builder(
absl::StrCat("placeholder_", e->dst()->name()), "Placeholder");
placeholder_builder.Attr("dtype", e->src()->output_type(e->src_output()));
placeholder_builder.Attr(kXlaIsPlaceholderForTailOcAttrName, true);
NodeDef placeholder_def;
TF_RETURN_IF_ERROR(placeholder_builder.Finalize(&placeholder_def));
TF_ASSIGN_OR_RETURN(Node * placeholder_node,
xla_graph->AddNode(placeholder_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
xla_graph->RemoveEdge(e);
xla_graph->AddEdge(placeholder_node, 0, dst, dst_input);
}
VLOG(4) << "RemoveUnusedArgRetvalPair host graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_arg_ret_host_", xla_func_name),
*g);
VLOG(4) << "RemoveUnusedArgRetvalPair XLA graph: "
<< DumpGraphToFile(
absl::StrCat("remove_unused_arg_ret_xla_", xla_func_name),
*xla_graph);
return absl::OkStatus();
}
void RemoveUnusedTPUReplicatedInputs(Graph* graph) {
for (Node* n : graph->nodes()) {
if (n->type_string() == kTPUReplicatedInput) {
bool has_output = false;
for (const Edge* e : n->out_edges()) {
if (!e->dst()->IsSink()) {
has_output = true;
break;
}
}
if (!has_output) {
std::vector<Node*> to_be_removed_src_nodes;
for (const auto& e_in : n->in_edges()) {
if (!e_in->IsControlEdge() &&
(e_in->src()->type_string() == kTPUPartitionedInput ||
e_in->src()->type_string() == kTPUPartitionedInputV2))
to_be_removed_src_nodes.push_back(e_in->src());
}
graph->RemoveNode(n);
for (Node* node : to_be_removed_src_nodes) {
graph->RemoveNode(node);
}
}
}
}
}
Status RenameClustersWithDuplicatedNames(Graph* g) {
std::unordered_map<std::string, std::vector<Node*>>
cluster_name_to_metadata_nodes;
std::unordered_set<std::string> cluster_names;
for (Node* n : g->nodes()) {
if (n->type_string() != "TPUReplicateMetadata") {
continue;
}
std::string cluster_name;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), kTPUReplicateAttr, &cluster_name));
cluster_name_to_metadata_nodes[cluster_name].push_back(n);
cluster_names.insert(cluster_name);
}
for (const auto& iter : cluster_name_to_metadata_nodes) {
if (iter.second.size() == 1) {
continue;
}
for (int i = 1; i < iter.second.size(); i++) {
std::string new_cluster_name;
int cluster_name_suffix = 1;
while (true) {
new_cluster_name = absl::StrCat(iter.first, "_", cluster_name_suffix);
if (cluster_names.find(new_cluster_name) == cluster_names.end()) {
break;
}
cluster_name_suffix++;
}
cluster_names.insert(new_cluster_name);
std::queue<Node*> queue;
queue.push(iter.second.at(i));
absl::flat_hash_set<Node*> visited;
while (!queue.empty()) {
Node* n = queue.front();
queue.pop();
visited.insert(n);
n->ClearAttr(kTPUReplicateAttr);
n->AddAttr(kTPUReplicateAttr, new_cluster_name);
std::string cluster_name;
for (const Edge* e : n->out_edges()) {
if (GetNodeAttr(e->dst()->def(), kTPUReplicateAttr, &cluster_name)
.ok() &&
cluster_name == iter.first &&
visited.find(e->dst()) == visited.end()) {
queue.push(e->dst());
}
}
}
for (const Edge* e : iter.second.at(i)->out_edges()) {
if (e->dst()->type_string() == "TPUCompilationResult") {
e->dst()->ClearAttr("_tpu_compilation_status");
e->dst()->AddAttr("_tpu_compilation_status", new_cluster_name);
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<FunctionBody>> InstantiateAssociatedFunction(
const Node& n, absl::string_view function_name_attr,
FunctionLibraryDefinition* fld) {
std::unique_ptr<FunctionBody> fbody;
NameAttrList func_attr_list;
TF_RETURN_IF_ERROR(GetNodeAttr(n.def(), function_name_attr, &func_attr_list));
const FunctionDef* fdef = fld->Find(func_attr_list.name());
if (fdef == nullptr) {
return absl::InternalError(absl::StrCat("Cannot find ", function_name_attr,
" function", "for node ",
n.DebugString()));
}
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*fdef, AttrSlice(&func_attr_list.attr()), fld, &fbody));
return fbody;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForIfNode(
const Node& if_node, FunctionLibraryDefinition* fld) {
absl::flat_hash_set<int> args_to_lift_indices;
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(if_node.def(), "Tin", &dtypes));
int num_args = dtypes.size();
for (int i = 0; i < num_args; i++) {
if (dtypes[i] == DT_RESOURCE) {
args_to_lift_indices.insert(i);
}
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(if_node, "then_branch", fld));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(if_node, "else_branch", fld));
for (int i = 0; i < num_args; ++i) {
bool used = false;
const Node* then_arg_node = then_branch_fbody->arg_nodes[i];
for (const Edge* e : then_arg_node->out_edges()) {
used = true;
if (e->IsControlEdge() ||
HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr))
continue;
args_to_lift_indices.erase(i);
break;
}
const Node* else_arg_node = else_branch_fbody->arg_nodes[i];
for (const Edge* e : else_arg_node->out_edges()) {
used = true;
if (e->IsControlEdge() ||
HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr))
continue;
args_to_lift_indices.erase(i);
break;
}
if (!used) args_to_lift_indices.erase(i);
}
return args_to_lift_indices;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForWhileNode(
Node* while_node, FunctionLibraryDefinition* fld) {
absl::flat_hash_set<int> result;
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "T", &dtypes));
for (int i = 0; i < dtypes.size(); i++) {
if (dtypes[i] == DT_RESOURCE) {
result.insert(i);
}
}
NameAttrList cond_func;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "cond", &cond_func));
const FunctionDef* cond_fdef = fld->Find(cond_func.name());
if (cond_fdef == nullptr) {
return absl::InternalError(
absl::StrCat("Cannot find cond function ", cond_func.name(),
" for while node ", while_node->DebugString()));
}
std::unique_ptr<FunctionBody> cond_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*cond_fdef, AttrSlice(&cond_func.attr()), fld, &cond_fbody));
for (int i = 0; i < cond_fbody->arg_nodes.size(); i++) {
const Node* arg_node = cond_fbody->arg_nodes[i];
for (const Edge* e : arg_node->out_edges()) {
if (!e->IsControlEdge()) {
result.erase(i);
}
}
}
NameAttrList body_func;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "body", &body_func));
const FunctionDef* body_fdef = fld->Find(body_func.name());
if (body_fdef == nullptr) {
return absl::InternalError(
absl::StrCat("Cannot find body function ", body_func.name(),
" for while node ", while_node->DebugString()));
}
std::unique_ptr<FunctionBody> body_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*body_fdef, AttrSlice(&body_func.attr()), fld, &body_fbody));
for (int i = 0; i < body_fbody->ret_nodes.size(); i++) {
const Node* node = body_fbody->ret_nodes[i];
do {
TF_RETURN_IF_ERROR(node->input_node(0, &node));
} while (node->IsIdentity());
if (node != body_fbody->arg_nodes[i]) {
result.erase(i);
}
}
for (int i = 0; i < body_fbody->arg_nodes.size(); i++) {
const Node* arg_node = body_fbody->arg_nodes[i];
int data_edge_count = std::count_if(
arg_node->out_edges().begin(), arg_node->out_edges().end(),
[](const Edge* e) { return !e->IsControlEdge(); });
if (data_edge_count == 1) {
result.erase(i);
}
}
for (int i = 0; i < body_fbody->arg_nodes.size(); i++) {
const Node* arg_node = body_fbody->arg_nodes[i];
for (const Edge* e : arg_node->out_edges()) {
if (!e->dst()->IsRetval() &&
!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
result.erase(i);
break;
}
}
}
return result;
}
absl::StatusOr<absl::flat_hash_set<int>> FindArgsToLiftForCallNode(
Node* call_node, const FunctionBody& fbody) {
absl::flat_hash_set<int> result;
std::vector<DataType> dtypes(call_node->input_types().begin(),
call_node->input_types().end());
for (int i = 0; i < dtypes.size(); i++) {
if (dtypes[i] == DT_RESOURCE) {
result.insert(i);
}
}
for (int i = 0; i < fbody.arg_nodes.size(); i++) {
const Node* arg_node = fbody.arg_nodes[i];
if (arg_node->out_edges().empty()) {
result.erase(i);
continue;
}
for (const Edge* e : arg_node->out_edges()) {
if (!HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
result.erase(i);
break;
}
}
}
return result;
}
Status LiftOutsideCompilationOnlyArgs(Graph* g, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten);
Status LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
const FunctionBody& fbody, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, int* lifted_arg_count,
std::optional<std::string> new_func_name, bool* rewritten) {
*rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgs(
fbody.graph, flr, fld, lifted_arg_count, rewritten));
if (*rewritten) {
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(fbody.graph), fbody.record->fdef().signature().name(),
&rewritten_fdef));
if (new_func_name) {
rewritten_fdef.mutable_signature()->set_name(*new_func_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
} else {
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
fbody.record->fdef().signature().name(), rewritten_fdef));
}
}
return absl::OkStatus();
}
Status MakeIdentityNodesForArgsToLift(
const absl::flat_hash_set<int>& args_to_lift,
const int arg_to_input_edge_offset, Graph* g, Node* n,
absl::flat_hash_map<int, std::string>* lifted_arg_index_to_oc_cluster_name,
int* lifted_arg_count) {
int num_input = n->num_inputs();
for (int arg_index = 0; arg_index < num_input; ++arg_index) {
if (!args_to_lift.contains(arg_index)) continue;
int input_edge_index = arg_index + arg_to_input_edge_offset;
const Edge* arg_edge;
TF_RETURN_IF_ERROR(n->input_edge(input_edge_index, &arg_edge));
std::string node_name =
g->NewName(absl::StrCat("lifted_arg", *lifted_arg_count));
(*lifted_arg_count)++;
(*lifted_arg_index_to_oc_cluster_name)[arg_index] = node_name;
NodeDefBuilder id_builder(node_name, "Identity");
id_builder.Attr("T", n->input_type(input_edge_index));
id_builder.Attr(kOutsideCompilationAttr, id_builder.node_name());
id_builder.Attr(kXlaIsLiftedArgAttrName, true);
id_builder.Input(arg_edge->src()->name(), arg_edge->src_output(),
n->input_type(input_edge_index));
NodeDef id_def;
TF_RETURN_IF_ERROR(id_builder.Finalize(&id_def));
TF_ASSIGN_OR_RETURN(Node * id_node, g->AddNode(id_def));
g->AddEdge(arg_edge->src(), arg_edge->src_output(), id_node, 0);
g->AddControlEdge(id_node, n);
}
return absl::OkStatus();
}
Status RemoveArgsToLiftFromFunctionBody(
const absl::flat_hash_set<int>& args_to_lift,
const std::vector<DataType>& arg_dtypes,
const absl::flat_hash_map<int, std::string>&
lifted_arg_index_to_oc_cluster_name,
const absl::flat_hash_map<int, int>& index_mapping,
const FunctionBody* fbody) {
for (int i = 0; i < fbody->arg_nodes.size(); ++i) {
Node* arg_node = fbody->arg_nodes[i];
if (!args_to_lift.contains(i)) {
int new_index = index_mapping.at(i);
arg_node->ClearAttr("index");
arg_node->AddAttr("index", new_index);
arg_node->ClearAttr("T");
arg_node->AddAttr("T", arg_dtypes[i]);
continue;
}
std::vector<const Edge*> out_edges_to_oc;
for (const Edge* e : arg_node->out_edges()) {
if (HasNodeAttr(e->dst()->def(), kOutsideCompilationAttr)) {
out_edges_to_oc.push_back(e);
}
}
for (const Edge* e : out_edges_to_oc) {
std::string outside_compilation_cluster;
TF_RETURN_IF_ERROR(GetNodeAttr(e->dst()->def(), kOutsideCompilationAttr,
&outside_compilation_cluster));
NodeDefBuilder ph_builder(fbody->graph->NewName("lifted_arg"),
"Placeholder");
ph_builder.Attr("dtype", arg_dtypes[i]);
ph_builder.Attr(kOutsideCompilationAttr, outside_compilation_cluster);
TF_RET_CHECK(lifted_arg_index_to_oc_cluster_name.contains(i));
ph_builder.Attr(kXlaLiftedArgOutsideCompilationAttrName,
lifted_arg_index_to_oc_cluster_name.at(i));
NodeDef ph_def;
TF_RETURN_IF_ERROR(ph_builder.Finalize(&ph_def));
TF_ASSIGN_OR_RETURN(Node * ph_node, fbody->graph->AddNode(ph_def));
Node* dst = e->dst();
int dst_input = e->dst_input();
fbody->graph->RemoveEdge(e);
fbody->graph->AddEdge(ph_node, 0, dst, dst_input);
}
fbody->graph->RemoveNode(arg_node);
}
return absl::OkStatus();
}
Status CleanUpInEdges(const absl::flat_hash_map<int, int>& index_mapping,
const int arg_to_input_edge_offset, Graph* g, Node* n) {
int num_inputs = n->num_inputs();
for (int i = 0; i < num_inputs; ++i) {
if (i < arg_to_input_edge_offset) continue;
int arg_idx = i - arg_to_input_edge_offset;
const Edge* e;
TF_RETURN_IF_ERROR(n->input_edge(i, &e));
if (!index_mapping.contains(arg_idx)) {
g->RemoveEdge(e);
continue;
}
if (index_mapping.at(arg_idx) == arg_idx) continue;
g->AddEdge(e->src(), e->src_output(), n,
index_mapping.at(arg_idx) + arg_to_input_edge_offset);
g->RemoveEdge(e);
}
return absl::OkStatus();
}
void RemoveOutputIdentityNodesForWhileV2(Graph* g, Node* while_node) {
std::vector<const Edge*> edges_to_identity_node;
for (const Edge* e : while_node->out_edges()) {
if (!e->IsControlEdge() && e->dst()->IsIdentity()) {
edges_to_identity_node.push_back(e);
}
}
for (const Edge* e : edges_to_identity_node) {
Node* identity = e->dst();
std::vector<const Edge*> out_edges(identity->out_edges().begin(),
identity->out_edges().end());
for (const Edge* out_edge : out_edges) {
if (out_edge->IsControlEdge()) {
g->AddControlEdge(while_node, out_edge->dst());
} else {
Node* dst = out_edge->dst();
int dst_input = out_edge->dst_input();
g->RemoveEdge(out_edge);
g->AddEdge(while_node, e->src_output(), dst, dst_input);
}
}
g->RemoveNode(identity);
}
}
Status ReplaceOutputEdgesWithInputEdgeSourceForWhile(
const absl::flat_hash_set<int>& args_to_lift, Graph* g, Node* while_node) {
std::vector<const Edge*> edges_to_replace;
for (const Edge* e : while_node->out_edges()) {
if (args_to_lift.contains(e->src_output())) {
edges_to_replace.push_back(e);
}
}
for (const Edge* e : edges_to_replace) {
const Edge* input_edge;
TF_RETURN_IF_ERROR(while_node->input_edge(e->src_output(), &input_edge));
Node* dst = e->dst();
int dst_input = e->dst_input();
g->RemoveEdge(e);
g->AddEdge(input_edge->src(), input_edge->src_output(), dst, dst_input);
}
return absl::OkStatus();
}
absl::flat_hash_map<int, int> ArgIndexMapping(
const int num_args, const absl::flat_hash_set<int>& args_to_lift) {
absl::flat_hash_map<int, int> index_mapping;
int new_index = 0;
for (int i = 0; i < num_args; i++) {
if (!args_to_lift.contains(i)) {
index_mapping[i] = new_index;
++new_index;
}
}
return index_mapping;
}
void CleanUpRetvalsForWhileBody(
const absl::flat_hash_map<int, int>& index_mapping,
const std::vector<DataType>& dtypes, FunctionBody* fbody) {
for (int i = 0; i < fbody->ret_nodes.size(); i++) {
Node* ret_node = fbody->ret_nodes[i];
if (index_mapping.contains(i)) {
int new_index = index_mapping.at(i);
ret_node->ClearAttr("index");
ret_node->AddAttr("index", new_index);
ret_node->ClearAttr("T");
ret_node->AddAttr("T", dtypes[i]);
} else {
fbody->graph->RemoveNode(ret_node);
}
}
}
Status LiftOutsideCompilationOnlyArgsFromWhileNode(
Graph* g, Node* while_node, FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForWhileNode(while_node, fld));
if (args_to_lift.empty()) return absl::OkStatus();
RemoveOutputIdentityNodesForWhileV2(g, while_node);
TF_RETURN_IF_ERROR(ReplaceOutputEdgesWithInputEdgeSourceForWhile(
args_to_lift, g, while_node));
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "T", &dtypes));
absl::flat_hash_map<int, int> index_mapping =
ArgIndexMapping(dtypes.size(), args_to_lift);
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 0, g, while_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> cond_fbody,
InstantiateAssociatedFunction(*while_node, "cond", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
cond_fbody.get()));
FunctionDef rewritten_cond_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(cond_fbody->graph), cond_fbody->record->fdef().signature().name(),
&rewritten_cond_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
cond_fbody->record->fdef().signature().name(), rewritten_cond_fdef));
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> body_fbody,
InstantiateAssociatedFunction(*while_node, "body", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
body_fbody.get()));
CleanUpRetvalsForWhileBody(index_mapping, dtypes, body_fbody.get());
FunctionDef rewritten_body_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(body_fbody->graph), body_fbody->record->fdef().signature().name(),
&rewritten_body_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(
body_fbody->record->fdef().signature().name(), rewritten_body_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 0, g, while_node));
TF_RETURN_IF_ERROR(while_node->ShrinkTypeInfo(index_mapping, "T",
true));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgsFromIfNode(Graph* g, Node* if_node,
FunctionLibraryDefinition* fld,
int* lifted_arg_count,
bool* rewritten) {
*rewritten = false;
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForIfNode(*if_node, fld));
if (args_to_lift.empty()) return absl::OkStatus();
std::vector<DataType> dtypes;
TF_RETURN_IF_ERROR(GetNodeAttr(if_node->def(), "Tin", &dtypes));
absl::flat_hash_map<int, int> index_mapping;
int new_index = 0;
for (int i = 0; i < dtypes.size(); i++) {
if (!args_to_lift.contains(i)) {
index_mapping[i] = new_index;
++new_index;
}
}
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 1, g, if_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(*if_node, "then_branch", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
then_branch_fbody.get()));
FunctionDef rewritten_then_branch_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*(then_branch_fbody->graph),
then_branch_fbody->record->fdef().signature().name(),
&rewritten_then_branch_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(then_branch_fbody->record->fdef().signature().name(),
rewritten_then_branch_fdef));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(*if_node, "else_branch", fld));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
else_branch_fbody.get()));
FunctionDef rewritten_else_branch_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*(else_branch_fbody->graph),
else_branch_fbody->record->fdef().signature().name(),
&rewritten_else_branch_fdef));
TF_RETURN_IF_ERROR(
fld->ReplaceFunction(else_branch_fbody->record->fdef().signature().name(),
rewritten_else_branch_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 1, g, if_node));
TF_RETURN_IF_ERROR(if_node->ShrinkTypeInfo(index_mapping, "Tin",
false));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgsFromCallNode(
Graph* g, Node* call_node, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld, int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
NameAttrList func;
if (fld->Contains(call_node->type_string())) {
func.set_name(call_node->type_string());
*func.mutable_attr() = call_node->def().attr();
} else if (call_node->IsPartitionedCall()) {
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &func));
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
func.set_name(FunctionLibraryDefinition::kGradientOp);
*func.mutable_attr() = call_node->def().attr();
}
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(
flr->Instantiate(func.name(), AttrSlice(&func.attr()), &handle));
auto cleanup_handle = gtl::MakeCleanup(
[&flr, &handle]() { flr->ReleaseHandle(handle).IgnoreError(); });
const FunctionBody* fbody = flr->GetFunctionBody(handle);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<int> args_to_lift,
FindArgsToLiftForCallNode(call_node, *fbody));
if (args_to_lift.empty()) return absl::OkStatus();
std::vector<DataType> dtypes;
dtypes = std::vector<DataType>(call_node->input_types().begin(),
call_node->input_types().end());
absl::flat_hash_map<int, int> index_mapping =
ArgIndexMapping(dtypes.size(), args_to_lift);
absl::flat_hash_map<int, std::string> lifted_arg_index_to_oc_cluster_name;
TF_RETURN_IF_ERROR(MakeIdentityNodesForArgsToLift(
args_to_lift, 0, g, call_node,
&lifted_arg_index_to_oc_cluster_name, lifted_arg_count));
TF_RETURN_IF_ERROR(RemoveArgsToLiftFromFunctionBody(
args_to_lift, dtypes, lifted_arg_index_to_oc_cluster_name, index_mapping,
fbody));
FunctionDef rewritten_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*(fbody->graph), fbody->record->fdef().signature().name(),
&rewritten_fdef));
std::string new_func_name =
fld->UniqueFunctionName(fbody->record->fdef().signature().name());
rewritten_fdef.mutable_signature()->set_name(new_func_name);
TF_RETURN_IF_ERROR(fld->AddFunctionDef(rewritten_fdef));
TF_RETURN_IF_ERROR(CleanUpInEdges(
index_mapping, 0, g, call_node));
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
if (call_node->IsPartitionedCall()) {
NameAttrList f;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &f));
*node_def.mutable_attr() = f.attr();
} else if (fld->Contains(call_node->type_string())) {
*node_def.mutable_attr() = call_node->def().attr();
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
*node_def.mutable_attr() = call_node->def().attr();
node_def.mutable_attr()->erase(FunctionLibraryDefinition::kFuncAttr);
}
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
*rewritten = true;
return absl::OkStatus();
}
Status LiftOutsideCompilationOnlyArgs(Graph* g, FunctionLibraryRuntime* flr,
FunctionLibraryDefinition* fld,
int* lifted_arg_count, bool* rewritten) {
*rewritten = false;
std::vector<Node*> while_nodes, if_nodes, call_nodes;
for (Node* n : g->op_nodes()) {
if (HasNodeAttr(n->def(), kOutsideCompilationAttr)) {
continue;
}
if (n->IsWhileNode()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<FunctionBody> body_fbody,
InstantiateAssociatedFunction(*n, "body", fld));
bool func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*body_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten = *rewritten || func_rewritten;
while_nodes.push_back(n);
} else if (n->IsIfNode()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> then_branch_fbody,
InstantiateAssociatedFunction(*n, "then_branch", fld));
bool func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*then_branch_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten |= func_rewritten;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<FunctionBody> else_branch_fbody,
InstantiateAssociatedFunction(*n, "else_branch", fld));
func_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*else_branch_fbody, flr, fld, lifted_arg_count,
std::nullopt, &func_rewritten));
*rewritten |= func_rewritten;
if_nodes.push_back(n);
} else if (IsFunctionCall(*fld, *n)) {
call_nodes.push_back(n);
}
}
std::vector<Node*> rewritten_call_nodes;
for (Node* call_node : call_nodes) {
if (call_node->IsPartitionedCall()) {
std::unique_ptr<FunctionBody> function_fbody;
TF_ASSIGN_OR_RETURN(function_fbody,
InstantiateAssociatedFunction(*call_node, "f", fld));
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
function_fbody->record->fdef().signature().name());
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NameAttrList f;
TF_RETURN_IF_ERROR(GetNodeAttr(call_node->def(), "f", &f));
f.set_name(new_func_name);
call_node->ClearAttr("f");
call_node->AddAttr("f", f);
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
} else if (fld->Contains(call_node->type_string())) {
std::unique_ptr<FunctionBody> function_fbody;
const FunctionDef* fdef = fld->Find(call_node->type_string());
TF_RET_CHECK(fdef);
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*fdef, call_node->attrs(), fld,
&function_fbody));
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
function_fbody->record->fdef().signature().name());
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
*node_def.mutable_attr() = call_node->def().attr();
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
} else {
TF_RET_CHECK(call_node->type_string() ==
FunctionLibraryDefinition::kGradientOp);
FunctionLibraryRuntime::Handle handle;
TF_RETURN_IF_ERROR(flr->Instantiate(call_node->type_string(),
call_node->attrs(), &handle));
auto cleanup_handle = gtl::MakeCleanup(
[&flr, &handle]() { flr->ReleaseHandle(handle).IgnoreError(); });
bool func_rewritten = false;
std::string new_func_name = fld->UniqueFunctionName(
absl::StrCat(call_node->name(), "_lift_args"));
const FunctionBody* function_fbody = flr->GetFunctionBody(handle);
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsAndReplaceFunctionDef(
*function_fbody, flr, fld, lifted_arg_count, new_func_name,
&func_rewritten));
if (func_rewritten) {
NodeDef node_def;
node_def.set_name(g->NewName(call_node->name()));
node_def.set_op(new_func_name);
*node_def.mutable_attr() = call_node->def().attr();
node_def.mutable_attr()->erase(FunctionLibraryDefinition::kFuncAttr);
TF_ASSIGN_OR_RETURN(call_node, ReplaceNode(g, call_node, node_def));
}
*rewritten |= func_rewritten;
rewritten_call_nodes.push_back(call_node);
}
}
for (Node* n : while_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromWhileNode(
g, n, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
for (Node* n : if_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromIfNode(
g, n, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
for (Node* n : rewritten_call_nodes) {
bool node_rewritten = false;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgsFromCallNode(
g, n, flr, fld, lifted_arg_count, &node_rewritten));
*rewritten = *rewritten || node_rewritten;
}
if (*rewritten) {
VLOG(4) << DumpGraphToFile("after_lifting_args", *g, fld);
}
return absl::OkStatus();
}
bool ShouldSkipEncapsulationForNonTPUGraph() {
return flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.value();
}
}
Status EncapsulateTPUComputationsPass::Encapsulate(
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def) {
if (ShouldSkipEncapsulationForNonTPUGraph()) {
bool found_tpu_replicate = false;
for (const Node* n : (*graph)->nodes()) {
if (n->attrs().Find(kTPUReplicateAttr) != nullptr) {
found_tpu_replicate = true;
break;
}
}
if (!found_tpu_replicate) {
VLOG(1) << "No TPU replicate found, skipping encapsulation";
return absl::OkStatus();
}
}
for (const Edge* e : (*graph)->edges()) {
if (!e->IsControlEdge() &&
e->src()->attrs().Find(kTPUReplicateAttr) != nullptr &&
e->src()->attrs().Find(kOutsideCompilationAttr) == nullptr &&
e->dst()->attrs().Find(kTPUReplicateAttr) == nullptr &&
e->dst()->type_string() != kTPUReplicatedOutput) {
return absl::InvalidArgumentError(absl::StrCat(
"Undeclared output of TPU computation. A common cause of this error "
"is variable initializers that depend on the TPU computation. Edge: ",
FormatNodeForError(*e->src()), ":", e->src_output(), " -> ",
FormatNodeForError(*e->dst()), ":", e->dst_input()));
}
}
RemoveUnusedTPUReplicatedInputs(graph->get());
TF_RETURN_IF_ERROR(RenameClustersWithDuplicatedNames(graph->get()));
TF_RETURN_IF_ERROR(
PerformStaticShapeInferenceBeforeEncapsulation(graph->get()));
auto output = std::make_unique<Graph>((*graph)->op_registry());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kTPUReplicateAttr, **graph, RewriteSubgraph,
true, &output, flib_def),
"EncapsulateTPUComputationsPass failed");
graph->swap(output);
return absl::OkStatus();
}
Status EncapsulateTPUComputationsPass::BuildTPUReplicateOps(
Graph* graph) {
std::vector<Node*> replicate_nodes;
std::vector<Node*> guarantee_const_nodes;
for (Node* n : graph->nodes()) {
std::string name;
if (TryGetNodeAttr(n->attrs(), kTPUReplicateAttr, &name) &&
!TryGetNodeAttr(n->attrs(), kOutsideCompilationAttr, &name)) {
replicate_nodes.push_back(n);
} else if (n->type_string() == "GuaranteeConst") {
guarantee_const_nodes.push_back(n);
}
}
for (Node* n : guarantee_const_nodes) {
std::vector<std::pair<Node*, int>> predecessors;
for (const Edge* e : n->in_edges()) {
predecessors.emplace_back(e->src(), e->src_output());
}
std::vector<std::pair<Node*, int>> successors;
for (const Edge* e : n->out_edges()) {
successors.emplace_back(e->dst(), e->dst_input());
}
NodeDef ndef;
ndef.set_name(n->name());
ndef.set_op("Identity");
ndef.set_device(n->requested_device());
MergeDebugInfo(NodeDebugInfo(n->def()), &ndef);
AddNodeAttr("T", n->output_type(0), &ndef);
graph->RemoveNode(n);
TF_ASSIGN_OR_RETURN(Node * id_node, graph->AddNode(ndef));
for (const auto& pred : predecessors) {
if (pred.second < 0) {
graph->AddControlEdge(pred.first, id_node);
} else {
graph->AddEdge(pred.first, pred.second, id_node, 0);
}
}
for (const auto& succ : successors) {
if (succ.second < 0) {
graph->AddControlEdge(id_node, succ.first);
} else {
graph->AddEdge(id_node, 0, succ.first, succ.second);
}
}
}
for (Node* replicate : replicate_nodes) {
int num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(replicate->attrs(), "num_replicas", &num_replicas));
int variable_start_index;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(), "_variable_start_index",
&variable_start_index));
int guaranteed_const_start_index;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(),
"_guaranteed_const_start_index",
&guaranteed_const_start_index));
if (HasNodeAttr(replicate->def(), "use_tpu")) {
bool use_tpu;
TF_RETURN_IF_ERROR(GetNodeAttr(replicate->attrs(), "use_tpu", &use_tpu));
if (!use_tpu) {
LOG(WARNING) << "use_tpu=false attr on a TPUReplicate node is ignored.";
}
}
std::vector<const Edge*> in_edges;
TF_RETURN_IF_ERROR(replicate->input_edges(&in_edges));
int pos = 0;
std::vector<int> mirrored_variable_indices;
int distributed_var_start_index = 0;
while (pos < in_edges.size() &&
in_edges[pos]->src()->type_string() == kTPUReplicatedInput) {
int input_num_replicas;
TF_RETURN_IF_ERROR(
GetNodeAttr(in_edges[pos]->src()->attrs(), "N", &input_num_replicas));
bool is_mirrored_variable;
TF_CHECK_OK(GetNodeAttr(in_edges[pos]->src()->attrs(),
"is_mirrored_variable", &is_mirrored_variable));
if (is_mirrored_variable) {
mirrored_variable_indices.push_back(pos);
}
bool is_packed = false;
GetNodeAttr(in_edges[pos]->src()->attrs(), "is_packed", &is_packed)
.IgnoreError();
bool is_distributed_variable =
is_packed && (in_edges[pos]->src()->output_type(
in_edges[pos]->src_output()) == DT_RESOURCE);
if (!is_distributed_variable && input_num_replicas != num_replicas) {
return absl::InvalidArgumentError(absl::StrCat(
"Mismatched number of replicas. Computation has ", num_replicas,
" replicas, input '", FormatNodeForError(*in_edges[pos]->src()),
"' has ", input_num_replicas, " replicas."));
}
if (!is_distributed_variable) {
if (distributed_var_start_index < pos) {
return absl::InvalidArgumentError(
absl::StrCat("Expect a distributed resource after index ",
distributed_var_start_index,
", but got a replicated resource at index ", pos));
} else {
++distributed_var_start_index;
}
}
++pos;
}
const int num_replicated_inputs = distributed_var_start_index;
const int num_distributed_vars = pos - num_replicated_inputs;
const int num_variables =
std::max(0, guaranteed_const_start_index - variable_start_index);
const int num_guaranteed_constants =
in_edges.size() - guaranteed_const_start_index;
TF_RET_CHECK(num_guaranteed_constants >= 0);
VLOG(1) << "Replicate node '" << replicate->name() << "'"
<< " input edges: " << in_edges.size()
<< " num_replicated_inputs: " << num_replicated_inputs
<< " num_distributed_vars: " << num_distributed_vars
<< " num_variables: " << num_variables
<< " num_guaranteed_constants: " << num_guaranteed_constants
<< " num_mirrored_variables: " << mirrored_variable_indices.size();
const int num_broadcast_inputs =
in_edges.size() - (num_replicated_inputs + num_distributed_vars +
num_variables + num_guaranteed_constants);
TF_RET_CHECK(num_broadcast_inputs >= 0);
const int num_inputs = num_replicated_inputs * num_replicas +
num_distributed_vars + num_broadcast_inputs +
num_guaranteed_constants + num_variables;
std::vector<Node*> nodes_to_remove = {replicate};
std::vector<std::pair<Node*, int>> data_inputs(num_inputs);
gtl::FlatSet<Node*> control_inputs;
AddControlInputs(*replicate, &control_inputs);
DataTypeVector replicated_input_types(num_replicated_inputs * num_replicas +
num_distributed_vars);
for (int i = 0; i < num_replicated_inputs; ++i) {
std::vector<const Edge*> replica_in_edges;
TF_RETURN_IF_ERROR(in_edges[i]->src()->input_edges(&replica_in_edges));
for (int replica = 0; replica < num_replicas; ++replica) {
int pos = replica * num_replicated_inputs + i;
const Edge* edge = replica_in_edges[replica];
data_inputs[pos] = {edge->src(), edge->src_output()};
replicated_input_types[pos] = EdgeType(edge);
}
AddControlInputs(*in_edges[i]->src(), &control_inputs);
nodes_to_remove.push_back(in_edges[i]->src());
}
for (int i = 0; i < num_distributed_vars; ++i) {
int pos = num_replicas * num_replicated_inputs + i;
std::vector<const Edge*> replica_in_edges;
TF_RETURN_IF_ERROR(
in_edges[num_replicated_inputs + i]->src()->input_edges(
&replica_in_edges));
TF_RET_CHECK(replica_in_edges.size() == 1);
const Edge* edge = replica_in_edges[0];
data_inputs[pos] = {edge->src(), edge->src_output()};
replicated_input_types[pos] = EdgeType(edge);
AddControlInputs(*in_edges[num_replicated_inputs + i]->src(),
&control_inputs);
nodes_to_remove.push_back(in_edges[num_replicated_inputs + i]->src());
}
DataTypeVector broadcast_input_types(num_broadcast_inputs);
for (int i = 0; i < num_broadcast_inputs; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars + i;
const Edge* edge =
in_edges[num_replicated_inputs + num_distributed_vars + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
broadcast_input_types[i] = EdgeType(edge);
}
for (int i = 0; i < num_variables; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + i;
const Edge* edge = in_edges[num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
}
DataTypeVector guaranteed_constant_types(num_guaranteed_constants);
for (int i = 0; i < num_guaranteed_constants; ++i) {
int pos = num_replicas * num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + num_variables + i;
const Edge* edge = in_edges[num_replicated_inputs + num_distributed_vars +
num_broadcast_inputs + num_variables + i];
data_inputs[pos] = {edge->src(), edge->src_output()};
guaranteed_constant_types[i] = EdgeType(edge);
}
const int num_outputs = replicate->output_types().size();
gtl::FlatSet<Node*> control_outputs;
std::vector<Node*> replicated_outputs(num_outputs);
for (const Edge* e : replicate->out_edges()) {
if (e->IsControlEdge()) {
control_outputs.insert(e->dst());
} else {
TF_RET_CHECK(e->src_output() < num_outputs);
TF_RET_CHECK(e->dst()->type_string() == kTPUReplicatedOutput)
<< e->DebugString();
TF_RET_CHECK(e->dst()->output_types().size() == num_replicas);
replicated_outputs[e->src_output()] = e->dst();
nodes_to_remove.push_back(e->dst());
AddControlOutputs(*e->dst(), &control_outputs);
}
}
std::vector<std::vector<std::pair<Node*, int>>> data_outputs(num_replicas *
num_outputs);
DataTypeVector output_types(num_replicas * num_outputs);
for (int i = 0; i < num_outputs; ++i) {
std::vector<std::vector<const Edge*>> replica_out_edges(num_replicas);
TF_RET_CHECK(replicated_outputs[i] != nullptr);
for (const Edge* e : replicated_outputs[i]->out_edges()) {
TF_RET_CHECK(!e->IsControlEdge());
replica_out_edges[e->src_output()].push_back(e);
}
for (int replica = 0; replica < num_replicas; ++replica) {
const int pos = replica * num_outputs + i;
for (const Edge* edge : replica_out_edges[replica]) {
data_outputs[pos].push_back({edge->dst(), edge->dst_input()});
}
output_types[pos] = replicated_outputs[i]->input_type(0);
}
}
NodeDef def;
def.set_name(replicate->name());
def.set_op("_TPUReplicate");
MergeDebugInfo(NodeDebugInfo(replicate->def()), &def);
NameAttrList computation;
computation.set_name(replicate->type_string());
AddNodeAttr("computation", computation, &def);
for (const auto& attr : replicate->attrs()) {
def.mutable_attr()->insert(attr);
}
AddNodeAttr("Tinputs", replicated_input_types, &def);
AddNodeAttr("Tbroadcast_inputs", broadcast_input_types, &def);
AddNodeAttr("NumVariables", num_variables, &def);
AddNodeAttr("Tguaranteed_constants", guaranteed_constant_types, &def);
AddNodeAttr("output_types", output_types, &def);
AddNodeAttr(TPUREPLICATE_MIRRORED_VAR_INDICES_ATTR,
mirrored_variable_indices, &def);
AddNodeAttr("num_distributed_variables", num_distributed_vars, &def);
for (Node* node : nodes_to_remove) {
VLOG(2) << "Deleting node " << node->DebugString();
control_inputs.erase(node);
control_outputs.erase(node);
graph->RemoveNode(node);
}
TF_ASSIGN_OR_RETURN(Node * tpu_replicate, graph->AddNode(def));
for (int i = 0; i < data_inputs.size(); ++i) {
graph->AddEdge(data_inputs[i].first, data_inputs[i].second, tpu_replicate,
i);
}
for (Node* n : control_inputs) {
graph->AddControlEdge(n, tpu_replicate);
}
for (int i = 0; i < data_outputs.size(); ++i) {
for (const auto& successor : data_outputs[i]) {
graph->AddEdge(tpu_replicate, i, successor.first, successor.second);
}
}
for (Node* n : control_outputs) {
graph->AddControlEdge(tpu_replicate, n);
}
}
return absl::OkStatus();
}
Status EncapsulateTPUComputationsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateTPUComputations(): "
<< DumpGraphToFile("encapsulate_tpu_computations_before",
**options.graph, options.flib_def);
TF_RETURN_IF_ERROR(Encapsulate(options.graph, options.flib_def));
VLOG(1) << "EncapsulateTPUComputations() half-way: "
<< DumpGraphToFile("encapsulate_tpu_computations_halfway",
**options.graph, options.flib_def);
TF_RETURN_IF_ERROR(BuildTPUReplicateOps(options.graph->get()));
VLOG(1) << "EncapsulateTPUComputations() finished: "
<< DumpGraphToFile("encapsulate_tpu_computations_after",
**options.graph, options.flib_def);
return absl::OkStatus();
}
Status ExtractOutsideCompilationPass::ProcessHeadTailOutsideCompilation(
const std::string& outside_compilation_attr_name, int* lifted_arg_count,
std::unordered_map<std::string, XlaClusterInfo>* clusters, Graph* g,
FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld) {
absl::node_hash_map<std::string, Node*> pivots;
std::string cluster_name;
for (Node* node : g->nodes()) {
if (TryGetNodeAttr(node->attrs(), kPivotForClusterAttr, &cluster_name)) {
pivots[cluster_name] = node;
}
}
for (auto& iter : *clusters) {
Node* pivot_node = pivots[iter.first];
std::string xla_func_name = iter.second.func_name_attrs.name();
std::unique_ptr<FunctionBody> xla_fbody;
TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(
*fld->Find(xla_func_name),
AttrSlice(&iter.second.func_name_attrs.attr()), fld, &xla_fbody));
Graph* xla_graph = xla_fbody->graph;
FixupSourceAndSinkEdges(xla_graph);
TF_RETURN_IF_ERROR(RemoveIdentityNodesForArgRetval(xla_graph));
bool rewritten;
TF_RETURN_IF_ERROR(LiftOutsideCompilationOnlyArgs(
xla_graph, flr, fld, lifted_arg_count, &rewritten));
TF_RETURN_IF_ERROR(MoveHeadOutsideCompilationToHost(
outside_compilation_attr_name, iter.second.func_name_attrs.name(),
iter.second.cluster_name, g, xla_graph, iter.second.node, pivot_node));
TF_RETURN_IF_ERROR(MoveTailOutsideCompilationToHost(
outside_compilation_attr_name, iter.second.func_name_attrs.name(),
iter.second.cluster_name, g, xla_graph, iter.second.node, pivot_node));
TF_RETURN_IF_ERROR(ReplaceArgUsedByOutsideCompilationWithPlaceholder(
outside_compilation_attr_name, xla_func_name, g, xla_graph,
iter.second.node));
TF_RETURN_IF_ERROR(RemoveEdgesBetweenArgAndRetval(
iter.second.func_name_attrs.name(), g, xla_graph, iter.second.node));
TF_RETURN_IF_ERROR(RemoveUnusedXlaInput(iter.second.func_name_attrs.name(),
g, xla_graph, iter.second.node));
TF_RETURN_IF_ERROR(RemoveUnusedXlaOutput(iter.second.func_name_attrs.name(),
g, xla_graph, iter.second.node));
FunctionDef replace_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*xla_graph, xla_func_name, &replace_fdef));
TF_RETURN_IF_ERROR(fld->ReplaceFunction(xla_func_name, replace_fdef));
FixupSourceAndSinkEdges(g);
}
return absl::OkStatus();
}
Status ExtractOutsideCompilationPass::Run(
const GraphOptimizationPassOptions& options) {
const auto* config =
(options.session_options ? &options.session_options->config : nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
nullptr, options.session_options->env,
config, TF_GRAPH_DEF_VERSION, options.flib_def,
config ? config->graph_options().optimizer_options()
: OptimizerOptions()));
FunctionLibraryRuntime* flr =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
static std::map<std::string, std::string>* kNodeTypeToFunctionAttrMapping =
new std::map<std::string, std::string>{
{"_TPUReplicate", "computation"},
};
std::unordered_map<std::string, XlaClusterInfo> clusters;
int lifted_arg_count = 0;
for (Node* n : (*options.graph)->nodes()) {
auto iter = kNodeTypeToFunctionAttrMapping->find(n->type_string());
if (iter == kNodeTypeToFunctionAttrMapping->end()) {
continue;
}
std::string xla_cluster_name = n->name();
std::string func_attr = iter->second;
NameAttrList func;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), func_attr, &func));
std::vector<std::string> core_list;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), "host_compute_core", &core_list));
std::map<std::string, int> host_compute_core;
TF_RETURN_IF_ERROR(ParseHostComputeCoreList(core_list, &host_compute_core));
clusters.emplace(xla_cluster_name, XlaClusterInfo{xla_cluster_name, func, n,
host_compute_core});
}
TF_RETURN_IF_ERROR(ProcessHeadTailOutsideCompilation(
kOutsideCompilationAttr, &lifted_arg_count, &clusters,
options.graph->get(), flr, options.flib_def));
bool modified;
TF_RETURN_IF_ERROR(ExtractOutsideCompilation(
kTPUReplicateAttr, kOutsideCompilationAttr, clusters,
options.graph->get(), flr, options.flib_def, &modified));
if (modified) {
TF_RETURN_IF_ERROR(
PruneUnreachableFunctionsFromGraph(**options.graph, options.flib_def));
}
return absl::OkStatus();
}
} | #include "tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.h"
#include <memory>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/config/flag_defs.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
std::unique_ptr<Graph> CreateGraph() {
auto g = std::make_unique<Graph>(OpRegistry::Global());
auto in0 = test::graph::Arg(g.get(), 0, DT_FLOAT);
auto in1 = test::graph::Arg(g.get(), 1, DT_FLOAT);
auto tmp = test::graph::Add(g.get(), in0, in1);
auto ret = test::graph::Retval(g.get(), 0, tmp);
g->AddControlEdge(in1, ret);
FixupSourceAndSinkEdges(g.get());
return g;
}
TEST(EncapsulateTPUComputationsPassTest, NonTPUGraph) {
auto g = CreateGraph();
GraphOptimizationPassOptions options;
options.graph = &g;
options.flib_def = g->mutable_flib_def();
EncapsulateTPUComputationsPass pass;
TF_ASSERT_OK(pass.Run(options));
int nodes_meeting_expectations = 0;
for (const auto* node : g->nodes()) {
if (!IsSource(node) && !IsSink(node)) {
ASSERT_TRUE(node->attrs().Find("_xla_inferred_shapes"));
++nodes_meeting_expectations;
}
}
EXPECT_EQ(nodes_meeting_expectations, 4);
}
TEST(EncapsulateTPUComputationsPassTest, SkipEncapsulationForNonTPUGraph) {
flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.reset(true);
auto g = CreateGraph();
GraphOptimizationPassOptions options;
options.graph = &g;
options.flib_def = g->mutable_flib_def();
EncapsulateTPUComputationsPass pass;
TF_ASSERT_OK(pass.Run(options));
int nodes_meeting_expectations = 0;
for (const auto* node : g->nodes()) {
if (!IsSource(node) && !IsSink(node)) {
ASSERT_FALSE(node->attrs().Find("_xla_inferred_shapes"));
++nodes_meeting_expectations;
}
}
EXPECT_EQ(nodes_meeting_expectations, 4);
flags::Global().enable_skip_encapsulation_for_non_tpu_graphs.reset(false);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fbc0f9e3-a934-4f25-b2af-dbb74096c5a7 | cpp | google/tsl | numa | tsl/platform/numa.h | tsl/platform/numa_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_NUMA_H_
#define TENSORFLOW_TSL_PLATFORM_NUMA_H_
#include "tsl/platform/platform.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace port {
bool NUMAEnabled();
int NUMANumNodes();
static const int kNUMANoAffinity = -1;
void NUMASetThreadNodeAffinity(int node);
int NUMAGetThreadNodeAffinity();
void* NUMAMalloc(int node, size_t size, int minimum_alignment);
void NUMAFree(void* ptr, size_t size);
int NUMAGetMemAffinity(const void* ptr);
}
}
#endif | #include "tsl/platform/numa.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Numa, NumNodes) {
if (port::NUMAEnabled()) {
EXPECT_GE(port::NUMANumNodes(), 1);
}
}
TEST(Numa, Malloc) {
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
void* ptr = port::NUMAMalloc(request_node, 8, 0);
EXPECT_NE(ptr, nullptr);
*(reinterpret_cast<int*>(ptr)) = 0;
int affinity_node = port::NUMAGetMemAffinity(ptr);
EXPECT_EQ(affinity_node, request_node);
port::NUMAFree(ptr, 8);
}
}
}
TEST(Numa, SetNodeAffinity) {
EXPECT_EQ(-1, port::NUMAGetThreadNodeAffinity());
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
port::NUMASetThreadNodeAffinity(request_node);
int affinity_node = port::NUMAGetThreadNodeAffinity();
EXPECT_EQ(affinity_node, request_node);
}
}
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/numa.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/numa_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
f71fad68-e03b-400c-b9ab-ee61e52d3905 | cpp | tensorflow/tensorflow | buffer | tensorflow/lite/delegates/gpu/cl/buffer.cc | tensorflow/lite/delegates/gpu/cl/buffer_test.cc | #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include <string>
#include "absl/status/status.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
absl::Status CreateBuffer(size_t size_in_bytes, bool gpu_read_only,
const void* data, CLContext* context,
Buffer* result) {
cl_mem buffer;
RETURN_IF_ERROR(CreateCLBuffer(context->context(), size_in_bytes,
gpu_read_only, const_cast<void*>(data),
&buffer));
*result = Buffer(buffer, size_in_bytes);
return absl::OkStatus();
}
absl::Status CreateSubBuffer(const Buffer& parent, size_t origin_in_bytes,
size_t size_in_bytes, bool gpu_read_only,
CLContext* context, Buffer* result) {
cl_mem buffer;
if (parent.IsSubBuffer()) {
return absl::InvalidArgumentError(
"Cannot create a sub-buffer from a sub-buffer!");
}
RETURN_IF_ERROR(CreateCLSubBuffer(context->context(), parent.GetMemoryPtr(),
origin_in_bytes, size_in_bytes,
gpu_read_only, &buffer));
*result = Buffer(buffer, size_in_bytes, true);
return absl::OkStatus();
}
}
Buffer::Buffer(cl_mem buffer, size_t size_in_bytes, bool is_sub_buffer)
: buffer_(buffer), size_(size_in_bytes), is_sub_buffer_(is_sub_buffer) {}
Buffer::Buffer(cl_mem buffer)
: buffer_(buffer), size_(0), is_sub_buffer_(false), owner_(false) {}
Buffer::Buffer(Buffer&& buffer)
: buffer_(buffer.buffer_),
size_(buffer.size_),
is_sub_buffer_(buffer.is_sub_buffer_),
owner_(buffer.owner_) {
buffer.buffer_ = nullptr;
buffer.size_ = 0;
buffer.is_sub_buffer_ = false;
}
Buffer& Buffer::operator=(Buffer&& buffer) {
if (this != &buffer) {
Release();
std::swap(size_, buffer.size_);
std::swap(buffer_, buffer.buffer_);
std::swap(is_sub_buffer_, buffer.is_sub_buffer_);
std::swap(owner_, buffer.owner_);
}
return *this;
}
void Buffer::Release() {
if (owner_ && buffer_) {
clReleaseMemObject(buffer_);
buffer_ = nullptr;
size_ = 0;
is_sub_buffer_ = false;
}
}
absl::Status Buffer::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
if (!buffer_desc) {
return absl::InvalidArgumentError("Expected BufferDescriptor on input.");
}
resources->buffers.push_back({"buffer", buffer_});
return absl::OkStatus();
}
absl::Status Buffer::CreateFromBufferDescriptor(const BufferDescriptor& desc,
CLContext* context) {
bool read_only = desc.memory_type == MemoryType::CONSTANT;
uint8_t* data_ptr = desc.data.empty()
? nullptr
: const_cast<unsigned char*>(desc.data.data());
size_ = desc.size;
return CreateCLBuffer(context->context(), desc.size, read_only, data_ptr,
&buffer_);
}
Buffer CreateBufferShared(cl_mem buffer) { return Buffer(buffer); }
absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateBuffer(size_in_bytes, true, nullptr, context, result);
}
absl::Status CreateReadOnlyBuffer(size_t size_in_bytes, const void* data,
CLContext* context, Buffer* result) {
return CreateBuffer(size_in_bytes, true, data, context, result);
}
absl::Status CreateReadWriteBuffer(size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateBuffer(size_in_bytes, false, nullptr, context, result);
}
absl::Status CreateReadWriteSubBuffer(const Buffer& parent,
size_t origin_in_bytes,
size_t size_in_bytes, CLContext* context,
Buffer* result) {
return CreateSubBuffer(parent, origin_in_bytes, size_in_bytes,
false, context, result);
}
}
}
} | #include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLTest, BufferTestFloat) {
const std::vector<float> data = {1.0, 2.0, 3.0, -4.0, 5.1};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(float) * 5, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<float> gpu_data;
ASSERT_OK(buffer.ReadData<float>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
TEST_F(OpenCLTest, BufferTestHalf) {
const std::vector<half> data = {half(1.4), half(2.1), half(2.2)};
Buffer buffer;
ASSERT_OK(CreateReadWriteBuffer(sizeof(half) * 3, &env_.context(), &buffer));
ASSERT_OK(buffer.WriteData(env_.queue(),
absl::MakeConstSpan(data.data(), data.size())));
std::vector<half> gpu_data;
ASSERT_OK(buffer.ReadData<half>(env_.queue(), &gpu_data));
EXPECT_THAT(gpu_data, Pointwise(FloatNear(0.0f), data));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ed05a639-8611-4b50-bf09-f303ce4b0779 | cpp | google/tensorstore | index_transform | tensorstore/index_space/index_transform.cc | tensorstore/index_space/index_transform_test.cc | #include "tensorstore/index_space/index_transform.h"
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <limits>
#include <numeric>
#include <string>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_identifier.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/json.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
std::string DescribeTransformForCast(DimensionIndex input_rank,
DimensionIndex output_rank) {
return tensorstore::StrCat(
"index transform with input ",
StaticCastTraits<DimensionIndex>::Describe(input_rank), " and output ",
StaticCastTraits<DimensionIndex>::Describe(output_rank));
}
std::string DescribeDomainForCast(DimensionIndex rank) {
return tensorstore::StrCat("index domain with ",
StaticCastTraits<DimensionIndex>::Describe(rank));
}
Result<IndexTransform<>> SliceByIndexDomain(IndexTransform<> transform,
IndexDomainView<> domain) {
using internal_index_space::TransformAccess;
assert(transform.valid());
assert(domain.valid());
TransformRep::Ptr<> rep =
MutableRep(TransformAccess::rep_ptr<container>(std::move(transform)));
const DimensionIndex slice_rank = domain.rank();
const DimensionIndex input_rank = rep->input_rank;
const span<const std::string> domain_labels = domain.labels();
const span<std::string> transform_labels =
rep->input_labels().first(input_rank);
DimensionIndex transform_dims[kMaxRank];
const bool domain_unlabeled =
internal_index_space::IsUnlabeled(domain_labels);
if (domain_unlabeled || internal_index_space::IsUnlabeled(transform_labels)) {
if (slice_rank != input_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank of index domain (", slice_rank,
") must match rank of slice target (", input_rank,
") when the index domain or slice target is unlabeled"));
}
std::iota(&transform_dims[0], &transform_dims[slice_rank],
DimensionIndex(0));
if (!domain_unlabeled) {
std::copy_n(domain_labels.begin(), slice_rank, transform_labels.begin());
}
} else {
DimensionIndex next_potentially_unlabeled_dim = 0;
for (DimensionIndex i = 0; i < slice_rank; ++i) {
std::string_view label = domain_labels[i];
DimensionIndex j;
if (!label.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
j, NormalizeDimensionLabel(label, transform_labels));
} else {
while (true) {
if (next_potentially_unlabeled_dim == input_rank) {
return absl::InvalidArgumentError(
"Number of unlabeled dimensions in index domain exceeds number "
"of unlabeled dimensions in slice target");
}
if (transform_labels[next_potentially_unlabeled_dim].empty()) {
j = next_potentially_unlabeled_dim++;
break;
}
++next_potentially_unlabeled_dim;
}
}
transform_dims[i] = j;
}
if (next_potentially_unlabeled_dim != 0 && input_rank != slice_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank (", slice_rank,
") of index domain containing unlabeled dimensions must "
"equal slice target rank (",
input_rank, ")"));
}
}
bool domain_is_empty = false;
for (DimensionIndex i = 0; i < slice_rank; ++i) {
const DimensionIndex j = transform_dims[i];
const internal_index_space::InputDimensionRef d = rep->input_dimension(j);
const IndexInterval orig_domain =
d.optionally_implicit_domain().effective_interval();
const IndexInterval new_domain = domain[i];
if (!Contains(orig_domain, new_domain)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Cannot slice target dimension ", j, " {",
d.index_domain_dimension<view>(), "} with index domain dimension ", i,
" {", domain[i], "}"));
}
if (new_domain.empty()) domain_is_empty = true;
d.domain() = new_domain;
d.implicit_lower_bound() = false;
d.implicit_upper_bound() = false;
}
if (domain_is_empty) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexTransform<>> SliceByBox(IndexTransform<> transform,
BoxView<> domain) {
using internal_index_space::TransformAccess;
assert(transform.valid());
if (transform.input_rank() != domain.rank()) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Rank of index domain (", transform.input_rank(),
") must match rank of box (", domain.rank(), ")"));
}
TransformRep::Ptr<> rep =
MutableRep(TransformAccess::rep_ptr<container>(std::move(transform)));
bool domain_is_empty = false;
for (DimensionIndex i = 0; i < domain.rank(); ++i) {
const internal_index_space::InputDimensionRef d = rep->input_dimension(i);
const IndexInterval orig_domain =
d.optionally_implicit_domain().effective_interval();
const IndexInterval new_domain = domain[i];
if (new_domain.empty()) domain_is_empty = true;
if (!Contains(orig_domain, new_domain)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Cannot slice dimension ", i, " {", d.index_domain_dimension<view>(),
"} with interval {", domain[i], "}"));
}
d.domain() = new_domain;
d.implicit_lower_bound() = false;
d.implicit_upper_bound() = false;
}
if (domain_is_empty) {
ReplaceAllIndexArrayMapsWithConstantMaps(rep.get());
}
internal_index_space::DebugCheckInvariants(rep.get());
return TransformAccess::Make<IndexTransform<>>(std::move(rep));
}
Result<IndexDomain<>> SliceByBox(IndexDomain<> domain, BoxView<> box) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto transform, internal_index_space::SliceByBox(
TransformAccess::transform(std::move(domain)), box));
return std::move(transform).domain();
}
}
Result<bool> GetOutputRange(IndexTransformView<> transform,
MutableBoxView<> output_range) {
assert(output_range.rank() == transform.output_rank());
DimensionSet input_dim_used;
bool exact = true;
for (DimensionIndex output_dim = 0, output_rank = transform.output_rank();
output_dim < output_rank; ++output_dim) {
const auto output_index_map = transform.output_index_map(output_dim);
const OutputIndexMethod method = output_index_map.stride() == 0
? OutputIndexMethod::constant
: output_index_map.method();
switch (method) {
case OutputIndexMethod::constant: {
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
IndexInterval::Sized(output_index_map.offset(), 1));
break;
}
case OutputIndexMethod::single_input_dimension: {
const Index stride = output_index_map.stride();
if (stride < -1 || stride > 1) exact = false;
const DimensionIndex input_dim = output_index_map.input_dimension();
if (input_dim_used[input_dim]) {
exact = false;
} else {
input_dim_used[input_dim] = true;
}
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
GetAffineTransformRange(transform.input_domain()[input_dim],
output_index_map.offset(), stride));
break;
}
case OutputIndexMethod::array: {
exact = false;
const auto index_array_ref = output_index_map.index_array();
TENSORSTORE_ASSIGN_OR_RETURN(
output_range[output_dim],
GetAffineTransformRange(index_array_ref.index_range(),
output_index_map.offset(),
output_index_map.stride()));
break;
}
}
}
return exact;
}
namespace internal_index_space {
absl::Status ValidateInputDimensionResize(
OptionallyImplicitIndexInterval input_domain, Index requested_inclusive_min,
Index requested_exclusive_max) {
if (requested_inclusive_min != kImplicit &&
requested_inclusive_min != -kInfIndex &&
!IsFiniteIndex(requested_inclusive_min)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested inclusive min value ", requested_inclusive_min));
}
if (requested_exclusive_max != kImplicit &&
requested_exclusive_max != kInfIndex + 1 &&
!IsFiniteIndex(requested_exclusive_max - 1)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested exclusive max value ", requested_exclusive_max));
}
if (requested_inclusive_min != kImplicit &&
requested_exclusive_max != kImplicit &&
requested_inclusive_min > requested_exclusive_max) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid requested bounds [", requested_inclusive_min, ", ",
requested_exclusive_max, ")"));
}
if (!input_domain.implicit_lower() && requested_inclusive_min != kImplicit) {
return absl::InvalidArgumentError("Cannot change explicit lower bound");
}
if (!input_domain.implicit_upper() && requested_exclusive_max != kImplicit) {
return absl::InvalidArgumentError("Cannot change explicit upper bound");
}
return absl::OkStatus();
}
}
absl::Status PropagateInputDomainResizeToOutput(
IndexTransformView<> transform,
span<const Index> requested_input_inclusive_min,
span<const Index> requested_input_exclusive_max,
bool can_resize_tied_bounds, span<Index> output_inclusive_min_constraint,
span<Index> output_exclusive_max_constraint,
span<Index> new_output_inclusive_min, span<Index> new_output_exclusive_max,
bool* is_noop) {
assert(transform.valid());
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
assert(requested_input_inclusive_min.size() == transform.input_rank());
assert(requested_input_exclusive_max.size() == transform.input_rank());
assert(output_inclusive_min_constraint.size() == transform.output_rank());
assert(output_exclusive_max_constraint.size() == transform.output_rank());
assert(new_output_inclusive_min.size() == transform.output_rank());
assert(new_output_exclusive_max.size() == transform.output_rank());
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::ValidateInputDimensionResize(
transform.input_domain()[input_dim],
requested_input_inclusive_min[input_dim],
requested_input_exclusive_max[input_dim]),
MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Invalid resize request for input dimension ", input_dim)));
}
bool is_noop_value = true;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
output_inclusive_min_constraint[output_dim] = kImplicit;
output_exclusive_max_constraint[output_dim] = kImplicit;
new_output_inclusive_min[output_dim] = kImplicit;
new_output_exclusive_max[output_dim] = kImplicit;
const auto map = transform.output_index_map(output_dim);
if (map.method() != OutputIndexMethod::single_input_dimension) continue;
const DimensionIndex input_dim = map.input_dimension();
const Index requested_min = requested_input_inclusive_min[input_dim];
const Index requested_max = requested_input_exclusive_max[input_dim];
if (requested_min != kImplicit || requested_max != kImplicit) {
is_noop_value = false;
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" depends on resized input dimension ", input_dim,
" with non-unit stride of ", map.stride()));
}
Result<OptionallyImplicitIndexInterval> output_bounds =
GetAffineTransformRange(
{IndexInterval::UncheckedHalfOpen(
requested_min == kImplicit ? -kInfIndex : requested_min,
requested_max == kImplicit ? kInfIndex + 1 : requested_max),
requested_min == kImplicit, requested_max == kImplicit},
map.offset(), map.stride());
if (!output_bounds) {
return MaybeAnnotateStatus(
output_bounds.status(),
tensorstore::StrCat(
"Error propagating bounds for output dimension ", output_dim,
" from requested bounds for input dimension ", input_dim));
}
if (!output_bounds->implicit_lower()) {
new_output_inclusive_min[output_dim] = output_bounds->inclusive_min();
}
if (!output_bounds->implicit_upper()) {
new_output_exclusive_max[output_dim] = output_bounds->exclusive_max();
}
}
}
*is_noop = is_noop_value;
if (is_noop_value) return absl::OkStatus();
DimensionIndex num_input_dim_deps[kMaxRank];
std::fill_n(num_input_dim_deps, input_rank, static_cast<DimensionIndex>(0));
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_map(output_dim);
switch (map.method()) {
case OutputIndexMethod::constant:
if (!IsFiniteIndex(map.offset())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" has constant map with invalid offset ", map.offset()));
}
if (!can_resize_tied_bounds) {
output_inclusive_min_constraint[output_dim] = map.offset();
output_exclusive_max_constraint[output_dim] = map.offset() + 1;
}
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
if (!can_resize_tied_bounds) {
if (num_input_dim_deps[input_dim]++ != 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Input dimension ", input_dim,
" corresponds to a diagonal but "
"`resize_tied_bounds` was not specified"));
}
if (std::abs(map.stride()) != 1) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim, " depends on input dimension ",
input_dim, " with non-unit stride of ", map.stride(),
" but `resize_tied_bounds` was not specified"));
}
Result<OptionallyImplicitIndexInterval> output_bounds =
GetAffineTransformRange(transform.input_domain()[input_dim],
map.offset(), map.stride());
if (!output_bounds) {
return MaybeAnnotateStatus(
output_bounds.status(),
tensorstore::StrCat(
"Error propagating bounds for output dimension ",
output_dim, " from existing bounds for input dimension ",
input_dim));
}
if (!output_bounds->implicit_lower()) {
output_inclusive_min_constraint[output_dim] =
output_bounds->inclusive_min();
}
if (!output_bounds->implicit_upper()) {
output_exclusive_max_constraint[output_dim] =
output_bounds->exclusive_max();
}
}
break;
}
case OutputIndexMethod::array:
if (!can_resize_tied_bounds) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Output dimension ", output_dim,
" has index array map but `resize_tied_bounds` was "
"not specified"));
}
break;
}
}
return absl::OkStatus();
}
namespace {
template <typename MergeFn>
inline Result<IndexDomain<>> MergeIndexDomainsImpl(IndexDomainView<> a,
IndexDomainView<> b,
MergeFn merge) {
if (!a.valid()) return b;
if (!b.valid()) return a;
if (a.rank() != b.rank()) {
return absl::InvalidArgumentError("Ranks do not match");
}
const DimensionIndex rank = a.rank();
auto new_rep = internal_index_space::TransformRep::Allocate(rank, 0);
new_rep->input_rank = rank;
new_rep->output_rank = 0;
const auto a_labels = a.labels();
const auto b_labels = b.labels();
for (DimensionIndex i = 0; i < rank; ++i) {
auto status = [&] {
TENSORSTORE_ASSIGN_OR_RETURN(
auto new_label, MergeDimensionLabels(a_labels[i], b_labels[i]));
TENSORSTORE_ASSIGN_OR_RETURN(auto new_bounds, merge(a[i], b[i]));
new_rep->input_dimension(i) =
IndexDomainDimension<view>(new_bounds, new_label);
return absl::OkStatus();
}();
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat("Mismatch in dimension ", i));
}
}
internal_index_space::DebugCheckInvariants(new_rep.get());
return internal_index_space::TransformAccess::Make<IndexDomain<>>(
std::move(new_rep));
}
}
Result<IndexDomain<>> MergeIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result =
MergeIndexDomainsImpl(a, b, MergeOptionallyImplicitIndexIntervals);
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot merge index domain ", a,
" with index domain ", b));
}
return result;
}
Result<IndexDomain<>> HullIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b)
-> Result<OptionallyImplicitIndexInterval> { return Hull(a, b); });
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot hull index domain ", a,
" with index domain ", b));
}
return result;
}
Result<IndexDomain<>> IntersectIndexDomains(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval a, OptionallyImplicitIndexInterval b)
-> Result<OptionallyImplicitIndexInterval> {
return Intersect(a, b);
});
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot intersect index domain ",
a, " with index domain ", b));
}
return result;
}
Result<IndexDomain<>> ConstrainIndexDomain(IndexDomainView<> a,
IndexDomainView<> b) {
auto result = MergeIndexDomainsImpl(
a, b,
[](OptionallyImplicitIndexInterval ai, OptionallyImplicitIndexInterval bi)
-> Result<OptionallyImplicitIndexInterval> {
const bool constrain_lower =
ai.implicit_lower() && ai.inclusive_min() == -kInfIndex;
const bool constrain_upper =
ai.implicit_upper() && ai.inclusive_max() == kInfIndex;
return OptionallyImplicitIndexInterval{
IndexInterval::UncheckedClosed(
constrain_lower ? bi.inclusive_min() : ai.inclusive_min(),
constrain_upper ? bi.inclusive_max() : ai.inclusive_max()),
constrain_lower ? bi.implicit_lower() : ai.implicit_lower(),
constrain_upper ? bi.implicit_upper() : ai.implicit_upper()};
});
if (!result.ok()) {
return tensorstore::MaybeAnnotateStatus(
result.status(), tensorstore::StrCat("Cannot constrain index domain ",
a, " with index domain ", b));
}
return result;
}
namespace internal {
OneToOneInputDimensions GetOneToOneInputDimensions(
IndexTransformView<> transform, bool require_unit_stride) {
DimensionSet non_one_to_one_input_dims;
DimensionSet seen_input_dims;
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
const Index stride = map.stride();
if (require_unit_stride ? (stride != 1 && stride != -1)
: stride == std::numeric_limits<Index>::min()) {
non_one_to_one_input_dims[input_dim] = true;
break;
}
if (seen_input_dims[input_dim]) {
non_one_to_one_input_dims[input_dim] = true;
break;
}
seen_input_dims[input_dim] = true;
break;
}
case OutputIndexMethod::array: {
const auto index_array = map.index_array();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
if (index_array.byte_strides()[input_dim] != 0) {
non_one_to_one_input_dims[input_dim] = true;
}
}
break;
}
}
}
return {seen_input_dims & ~non_one_to_one_input_dims,
non_one_to_one_input_dims};
}
void ComputeInputDimensionReferenceCounts(
IndexTransformView<> transform,
span<DimensionIndex> input_dimension_reference_counts) {
using internal_index_space::TransformAccess;
assert(transform.valid());
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_dimension_reference_counts.size() == input_rank);
std::fill_n(input_dimension_reference_counts.begin(), input_rank,
DimensionIndex(0));
auto transform_rep = TransformAccess::rep(transform);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& output_map = transform_rep->output_index_maps()[output_dim];
switch (output_map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension:
++input_dimension_reference_counts[output_map.input_dimension()];
break;
case OutputIndexMethod::array: {
const auto& index_array_data = output_map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
if (index_array_data.byte_strides[input_dim] != 0) {
++input_dimension_reference_counts[input_dim];
}
}
break;
}
}
}
}
std::pair<DimensionSet, bool> GetInputDimensionsForOutputDimension(
IndexTransformView<> transform, DimensionIndex output_dim) {
DimensionSet input_dims;
bool has_array_dependence = false;
const auto map = transform.output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
input_dims[map.input_dimension()] = true;
break;
}
case OutputIndexMethod::array: {
const auto index_array = map.index_array();
const DimensionIndex input_rank = transform.input_rank();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (index_array.byte_strides()[input_dim] != 0) {
input_dims[input_dim] = true;
has_array_dependence = true;
}
}
break;
}
}
return {input_dims, has_array_dependence};
}
}
Result<IndexTransform<>> ComposeOptionalTransforms(IndexTransform<> b_to_c,
IndexTransform<> a_to_b) {
if (!b_to_c.valid()) return a_to_b;
if (!a_to_b.valid()) return b_to_c;
return ComposeTransforms(std::move(b_to_c), std::move(a_to_b));
}
namespace internal_index_space {
bool IndexTransformNonNullSerializer::Encode(serialization::EncodeSink& sink,
IndexTransformView<> value) {
return serialization::Encode(sink, ::nlohmann::json(value));
}
bool IndexTransformNonNullSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
::nlohmann::json json;
if (!serialization::Decode(source, json)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
value,
internal_index_space::ParseIndexTransformFromJson(
json, input_rank_constraint, output_rank_constraint),
(source.Fail(_), false));
return true;
}
bool IndexTransformSerializer::Encode(serialization::EncodeSink& sink,
IndexTransformView<> value) {
return serialization::MaybeNullSerializer<IndexTransformView<>,
IndexTransformNonNullSerializer,
serialization::IsValid>()
.Encode(sink, value);
}
bool IndexTransformSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
return serialization::MaybeNullSerializer<
internal_index_space::TransformRep::Ptr<>,
IndexTransformNonNullSerializer>{
IndexTransformNonNullSerializer{input_rank_constraint,
output_rank_constraint}}
.Decode(source, value);
}
bool IndexDomainNonNullSerializer::Encode(serialization::EncodeSink& sink,
IndexDomainView<> value) {
return serialization::Encode(sink, ::nlohmann::json(value));
}
bool IndexDomainNonNullSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
::nlohmann::json json;
if (!serialization::Decode(source, json)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
value,
internal_index_space::ParseIndexDomainFromJson(json, rank_constraint),
(source.Fail(_), false));
return true;
}
bool IndexDomainSerializer::Encode(serialization::EncodeSink& sink,
IndexDomainView<> value) {
return serialization::MaybeNullSerializer<IndexDomainView<>,
IndexDomainNonNullSerializer,
serialization::IsValid>()
.Encode(sink, value);
}
bool IndexDomainSerializer::Decode(
serialization::DecodeSource& source,
internal_index_space::TransformRep::Ptr<>& value) const {
return serialization::MaybeNullSerializer<
internal_index_space::TransformRep::Ptr<>,
IndexDomainNonNullSerializer>{
IndexDomainNonNullSerializer{rank_constraint}}
.Decode(source, value);
}
}
} | #include "tensorstore/index_space/index_transform.h"
#include <array>
#include <string_view>
#include <type_traits>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::HullIndexDomains;
using ::tensorstore::IdentityTransform;
using ::tensorstore::Index;
using ::tensorstore::IndexDomain;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexDomainDimension;
using ::tensorstore::IndexDomainView;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransform;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::IntersectIndexDomains;
using ::tensorstore::IsIndexDomain;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::MergeIndexDomains;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::StaticCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::StrCat;
using ::tensorstore::unchecked;
using ::tensorstore::view;
using ::tensorstore::internal::ComputeInputDimensionReferenceCounts;
using ::tensorstore::internal::GetInputDimensionsForOutputDimension;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(IndexTransformTest, Equality) {
EXPECT_EQ(IndexTransform<>(), IndexTransform<>());
EXPECT_EQ(IndexTransformBuilder<>(2, 3).Finalize().value(),
IndexTransformBuilder<>(2, 3).Finalize().value());
EXPECT_NE(IndexTransformBuilder<>(2, 3).Finalize().value(),
IndexTransformBuilder<>(2, 2).Finalize().value());
EXPECT_NE(IndexTransformBuilder<>(3, 2).Finalize().value(),
IndexTransformBuilder<>(2, 2).Finalize().value());
EXPECT_EQ(
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value(),
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value());
EXPECT_NE(
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 4}).Finalize().value(),
IndexTransformBuilder<>(3, 2).input_shape({2, 3, 3}).Finalize().value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({3, 4, 5})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 2})
.input_shape({3, 4, 5})
.Finalize()
.value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.input_labels({"x", "y", "z"})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_labels({"x", "y", "z"})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_labels({"a", "b", "c"})
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_labels({"a", "b", "d"})
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2).Finalize().value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 0)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(0, 0)
.Finalize()
.value());
EXPECT_EQ(
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value(),
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value());
EXPECT_NE(
IndexTransformBuilder<>(3, 2).output_constant(0, 1).Finalize().value(),
IndexTransformBuilder<>(3, 2).output_constant(0, 2).Finalize().value());
EXPECT_EQ(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 0, 2, 1)
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.output_single_input_dimension(1, 1)
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 1}}}))
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}))
.Finalize()
.value());
EXPECT_NE(IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.Finalize()
.value(),
IndexTransformBuilder<>(3, 2)
.input_origin({1, 2, 3})
.input_shape({2, 2, 3})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 5))
.Finalize()
.value());
}
TEST(IndexTransformTest, ImplicitConversion) {
IndexTransform<2, 2> t = IdentityTransform<2>();
IndexTransform<> t_labeled = t;
EXPECT_EQ(IdentityTransform(2), t_labeled);
}
TEST(IndexTransformTest, Assign) {
auto make_labeled_transform = [] {
return IndexTransformBuilder<3, 3>()
.input_origin({0, 1, 2})
.input_shape({2, 2, 3})
.input_labels({"x", "y", "z"})
.output_index_array(0, 1, 4, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.output_single_input_dimension(1, 2, 5, 1)
.output_constant(2, 3)
.Finalize()
.value();
};
auto make_transform = [] {
return IndexTransformBuilder<3, 3>()
.input_origin({0, 1, 2})
.input_shape({2, 2, 3})
.output_index_array(0, 1, 4, MakeArray<Index>({{{1, 1, 2}}}),
IndexInterval::Closed(1, 4))
.output_single_input_dimension(1, 2, 5, 1)
.output_constant(2, 3)
.Finalize()
.value();
};
auto make_identity = [] { return IdentityTransform(2); };
auto make_labeled_identity = [] {
return IdentityTransform(span<const std::string_view>({"x", "y"}));
};
auto unlabeled_t = make_identity();
{
auto unlabeled_t2 = make_identity();
unlabeled_t2 = make_transform();
auto* rep_t2 = TransformAccess::rep(unlabeled_t2);
unlabeled_t = std::move(unlabeled_t2);
EXPECT_EQ(rep_t2, TransformAccess::rep(unlabeled_t));
EXPECT_EQ(nullptr, TransformAccess::rep(unlabeled_t2));
}
unlabeled_t = make_transform();
EXPECT_EQ(make_transform(), unlabeled_t);
unlabeled_t = IndexTransform<2, 2>();
EXPECT_FALSE(unlabeled_t.valid());
auto labeled_t = make_labeled_identity();
labeled_t = make_labeled_transform();
EXPECT_EQ(make_labeled_transform(), labeled_t);
{
auto labeled_t2 = make_labeled_transform();
labeled_t = labeled_t2;
EXPECT_EQ(labeled_t, make_labeled_transform());
labeled_t = make_labeled_identity();
}
{
auto labeled_t3 = make_labeled_identity();
labeled_t3 = make_labeled_transform();
labeled_t = labeled_t3;
EXPECT_EQ(make_labeled_transform(), labeled_t);
}
{
IndexTransform<2, 2> invalid_t;
labeled_t = invalid_t;
EXPECT_FALSE(labeled_t.valid());
}
}
TEST(IndexTransformTest, ToString) {
EXPECT_EQ("<Invalid index space transform>",
StrCat(IndexTransformView<1, 1>()));
EXPECT_EQ(
R"s(Rank 3 -> 4 index space transform:
Input domain:
0: [1*, 3) "x"
1: [2, 4*) "y"
2: [3, 7) "z"
Output index maps:
out[0] = 4
out[1] = 5 + 7 * in[2]
out[2] = 6
out[3] = 7 + 9 * bounded([0, 4), array(in)), where array =
{{{1, 0, 2, 2}}}
)s",
StrCat(IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.implicit_lower_bounds({1, 0, 0})
.implicit_upper_bounds({0, 1, 0})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9,
MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize()
.value()));
}
TEST(IndexTransformTest, GTestToString) {
EXPECT_EQ(
R"s(Rank 3 -> 4 index space transform:
Input domain:
0: [1, 3) "x"
1: [2, 4) "y"
2: [3, 7) "z"
Output index maps:
out[0] = 4
out[1] = 5 + 7 * in[2]
out[2] = 6
out[3] = 7 + 9 * bounded([0, 4), array(in)), where array =
{{{1, 0, 2, 2}}}
)s",
::testing::PrintToString(
IndexTransformBuilder<>(3, 4)
.input_origin({1, 2, 3})
.input_shape({2, 2, 4})
.input_labels({"x", "y", "z"})
.output_constant(0, 4)
.output_single_input_dimension(1, 5, 7, 2)
.output_constant(2, 6)
.output_index_array(3, 7, 9, MakeArray<Index>({{{1, 0, 2, 2}}}),
IndexInterval::Closed(0, 3))
.Finalize()
.value()));
}
TEST(IndexTransformTest, Constant) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({4})
.output_constant(0, 10)
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(10));
}
TEST(IndexTransformTest, SingleInputDimension) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({20})
.output_single_input_dimension(0, 5, 2, 0)
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({6}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 6));
}
TEST(IndexTransformTest, IndexArray) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 5, 2, MakeArray<Index>({4, 5, 6}))
.Finalize()
.value();
std::array<Index, 1> output_indices;
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({1}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 4));
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({2}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 5));
ASSERT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(5 + 2 * 6));
}
TEST(TransformIndicesTest, ConstantAndSingleInputDimensionAndIndexArray) {
auto t = IndexTransformBuilder<>(3, 3)
.input_origin({1, 2, 3})
.input_shape({4, 4, 3})
.output_constant(0, 10)
.output_single_input_dimension(1, 20, 2, 2)
.output_index_array(2, 30, 3,
MakeArray<Index>({{{5}, {6}, {7}, {8}}}))
.Finalize()
.value();
std::array<Index, 3> output_indices;
ASSERT_EQ(
absl::OkStatus(),
t.TransformIndices(span<const Index, 3>({2, 4, 5}), output_indices));
EXPECT_THAT(output_indices,
::testing::ElementsAre(10, 20 + 2 * 5, 30 + 3 * 7));
}
TEST(TransformIndicesTest, Implicit) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.implicit_lower_bounds({1})
.input_shape({3})
.output_single_input_dimension(0, 0)
.Finalize()
.value();
std::array<Index, 1> output_indices;
EXPECT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({-3}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(-3));
EXPECT_THAT(t.TransformIndices(span<const Index, 1>({10}), output_indices),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 10 is not contained in the domain "
"\\[1\\*, 4\\) for input dimension 0"));
}
TEST(TransformIndicesTest, IndexRangeError) {
auto t = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({5, 6, 7}),
IndexInterval::Closed(6, 7))
.Finalize()
.value();
std::array<Index, 1> output_indices;
EXPECT_EQ(absl::OkStatus(),
t.TransformIndices(span<const Index, 1>({2}), output_indices));
EXPECT_THAT(output_indices, ::testing::ElementsAre(6));
EXPECT_THAT(t.TransformIndices(span<const Index, 1>({1}), output_indices),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Computing index for output dimension 0: "
"Checking result of index array output index map: "
"Index 5 is outside valid range \\[6, 8\\)"));
}
TEST(IndexTransformTest, ConstructMove) {
auto t = IdentityTransform(2);
auto* data = TransformAccess::rep(t);
IndexTransform<> t2(std::move(t));
EXPECT_EQ(data, TransformAccess::rep(t2));
}
TEST(IndexTransformTest, AssignMove) {
auto t = IdentityTransform(2);
auto* data = TransformAccess::rep(t);
IndexTransform<> t2;
t2 = std::move(t);
EXPECT_EQ(data, TransformAccess::rep(t2));
}
TEST(IndexDomainTest, DefaultConstruct) {
IndexDomainView<> d;
EXPECT_FALSE(d.valid());
}
TEST(IndexDomainTest, ConstructFromTransform) {
auto d = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
ASSERT_TRUE(d.valid());
EXPECT_EQ(2, d.rank());
EXPECT_THAT(d.origin(), ::testing::ElementsAre(1, 2));
EXPECT_THAT(d.shape(), ::testing::ElementsAre(3, 4));
EXPECT_THAT(d.implicit_lower_bounds(), DimensionSet::FromBools({1, 0}));
EXPECT_THAT(d.implicit_upper_bounds(), DimensionSet::FromBools({0, 1}));
EXPECT_THAT(d.labels(), ::testing::ElementsAre("x", "y"));
EXPECT_EQ(IndexDomainDimension<view>(
{IndexInterval::UncheckedSized(1, 3), true, false}, "x"),
d[0]);
EXPECT_EQ(IndexDomainDimension<view>(
{IndexInterval::UncheckedSized(2, 4), false, true}, "y"),
d[1]);
EXPECT_EQ(12, d.num_elements());
}
TEST(IndexDomainTest, CompareEqual) {
IndexDomain<2> d1;
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
IndexDomain<2> d3(IndexTransformBuilder<2, 1>()
.input_origin({1, 2})
.input_shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.input_labels({"x", "y"})
.output_constant(0, 1)
.Finalize()
.value()
.domain());
auto d4 = IndexDomainBuilder<2>()
.origin({1, 3})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d5 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 5})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d6 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d7 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({1, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d8 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"z", "y"})
.Finalize()
.value();
EXPECT_EQ(d1, d1);
EXPECT_EQ(d2, d2);
EXPECT_EQ(d3, d3);
EXPECT_EQ(d4, d4);
EXPECT_EQ(d5, d5);
EXPECT_EQ(d6, d6);
EXPECT_EQ(d7, d7);
EXPECT_EQ(d8, d8);
EXPECT_NE(d1, d2);
EXPECT_EQ(d2, d3);
EXPECT_NE(d2, d4);
EXPECT_NE(d2, d5);
EXPECT_NE(d2, d6);
EXPECT_NE(d2, d7);
EXPECT_NE(d2, d8);
}
TEST(IndexDomainTest, ConvertRank) {
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
IndexDomain<> d_dynamic = d2;
EXPECT_EQ(d_dynamic, d2);
IndexDomain<> d_dynamic_from_rvalue = IndexDomain<2>(d2);
EXPECT_EQ(d_dynamic_from_rvalue, d2);
auto d2_cast = StaticRankCast<2>(d_dynamic);
static_assert(std::is_same_v<decltype(d2_cast), Result<IndexDomain<2>>>);
EXPECT_EQ(d2_cast, d2);
auto d2_cast_rvalue = StaticRankCast<2>(IndexDomain<>(d_dynamic));
static_assert(
std::is_same_v<decltype(d2_cast_rvalue), Result<IndexDomain<2>>>);
EXPECT_EQ(d2_cast_rvalue, d2);
EXPECT_THAT(StaticRankCast<3>(d_dynamic),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot cast index domain with rank of 2 "
"to index domain with rank of 3"));
}
TEST(IndexDomainTest, SubDomain) {
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
auto d3 = IndexDomainBuilder<2>()
.origin({2, 1})
.shape({4, 3})
.implicit_lower_bounds({0, 1})
.implicit_upper_bounds({1, 0})
.labels({"y", "x"})
.Finalize()
.value();
EXPECT_EQ(d3, (d2[span<const DimensionIndex, 2>({1, 0})]));
}
TEST(IndexDomainTest, PrintToOstream) {
EXPECT_EQ("<invalid index domain>", StrCat(IndexDomain<2>()));
auto d2 = IndexDomainBuilder<2>()
.origin({1, 2})
.shape({3, 4})
.implicit_lower_bounds({1, 0})
.implicit_upper_bounds({0, 1})
.labels({"x", "y"})
.Finalize()
.value();
EXPECT_EQ(R"({ "x": [1*, 4), "y": [2, 6*) })", StrCat(d2));
}
static_assert(IsIndexDomain<bool> == false);
static_assert(IsIndexDomain<IndexDomain<3>> == true);
static_assert(IsIndexDomain<IndexDomainView<3>> == true);
TEST(CastTest, IndexTransform) {
auto t = IdentityTransform(span<const Index>({2, 3}));
auto t2 = StaticCast<IndexTransform<2, 2>, unchecked>(t);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t2);
EXPECT_THAT((StaticCast<IndexTransformView<2, 2>>(t)),
::testing::Optional(t));
EXPECT_THAT(
(StaticCast<IndexTransform<2, 3>>(t)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 2 and output rank of 3"));
EXPECT_THAT(
(tensorstore::StaticRankCast<3>(t)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 3 and output dynamic rank"));
}
TEST(CastTest, IndexTransformView) {
auto t = IdentityTransform(span<const Index>({2, 3}));
IndexTransformView<> t_ref = t;
auto t2 = StaticCast<IndexTransformView<2, 2>>(t_ref);
EXPECT_EQ(IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({2, 3})
.output_single_input_dimension(0, 0)
.output_single_input_dimension(1, 1)
.Finalize()
.value(),
t2);
EXPECT_THAT((StaticCast<IndexTransform<2, 2>>(t_ref)),
::testing::Optional(t));
EXPECT_THAT(
(StaticCast<IndexTransformView<2, 3>>(t_ref)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 2 and output rank of 3"));
EXPECT_THAT(
(tensorstore::StaticRankCast<3>(t_ref)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast "
"index transform with input rank of 2 and output rank of 2 to "
"index transform with input rank of 3 and output dynamic rank"));
}
TEST(MergeIndexDomainsTest, Basic) {
EXPECT_THAT(MergeIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain1,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(MergeIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(MergeIndexDomains(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(
MergeIndexDomains(domain1, domain2),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot merge index domain \\{ .* \\} with index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain3,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, 5, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain4,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, 12})
.labels({"", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain4_merged,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 0})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, 12})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(domain1, domain3),
::testing::Optional(domain3));
EXPECT_THAT(MergeIndexDomains(domain1, domain4),
::testing::Optional(domain4_merged));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain5,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"z", "", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain6,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({2, -kInfIndex, 2})
.inclusive_max({10, 11, kInfIndex})
.labels({"x", "", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain7,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, 2})
.inclusive_max({10, 12, kInfIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(MergeIndexDomains(domain1, domain5),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 0: "
"Dimension labels do not match"));
EXPECT_THAT(MergeIndexDomains(domain1, domain6),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 0: "
"Lower bounds do not match"));
EXPECT_THAT(MergeIndexDomains(domain1, domain7),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot merge .*: "
"Mismatch in dimension 1: "
"Upper bounds do not match"));
}
TEST(HullIndexDomains, Basic) {
EXPECT_THAT(HullIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(HullIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(HullIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(HullIndexDomains(domain1, domain1), ::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(
HullIndexDomains(domain1, domain2),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot hull index domain \\{ .* \\} with index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({0, -kInfIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(HullIndexDomains(domain1, domain3), ::testing::Optional(domain4));
}
TEST(IntersectIndexDomains, Basic) {
EXPECT_THAT(IntersectIndexDomains(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(IntersectIndexDomains(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(IntersectIndexDomains(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot intersect index domain \\{ .* \\} with "
"index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({1, kMinFiniteIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain3),
::testing::Optional(domain4));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain5, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain6, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({1, 1, 1})
.origin({1, kMinFiniteIndex, kMinFiniteIndex})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(IntersectIndexDomains(domain1, domain5),
::testing::Optional(domain6));
}
TEST(ConstrainIndexDomain, Basic) {
using ::tensorstore::ConstrainIndexDomain;
EXPECT_THAT(ConstrainIndexDomain(IndexDomain<>(), IndexDomain<>()),
::testing::Optional(IndexDomain<>()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain1, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 0})
.implicit_upper_bounds({0, 0, 1})
.origin({1, kMinFiniteIndex, -kInfIndex})
.inclusive_max({10, kInfIndex, kMaxFiniteIndex})
.labels({"x", "", ""})
.Finalize());
EXPECT_THAT(ConstrainIndexDomain(IndexDomain<>(), domain1),
::testing::Optional(domain1));
EXPECT_THAT(ConstrainIndexDomain(domain1, IndexDomain<>()),
::testing::Optional(domain1));
EXPECT_THAT(ConstrainIndexDomain(domain1, domain1),
::testing::Optional(domain1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto domain2,
IndexDomainBuilder(4).Finalize());
EXPECT_THAT(ConstrainIndexDomain(domain1, domain2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot constrain index domain \\{ .* \\} with "
"index domain \\{ .* \\}: "
"Ranks do not match"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain3, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, -kInfIndex, -100})
.inclusive_max({9, kMaxFiniteIndex, kInfIndex})
.labels({"x", "y", ""})
.Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain4, IndexDomainBuilder(3)
.implicit_lower_bounds({0, 0, 1})
.implicit_upper_bounds({1, 1, 1})
.origin({0, kMinFiniteIndex, -100})
.inclusive_max({9, kMaxFiniteIndex, kMaxFiniteIndex})
.labels({"x", "y", ""})
.Finalize());
EXPECT_THAT(ConstrainIndexDomain(domain3, domain1),
::testing::Optional(domain4));
}
TEST(IndexTransformTest, WithImplicitDimensions) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_transform,
IndexTransformBuilder(3, 3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.output_identity_transform()
.Finalize());
EXPECT_EQ(expected_transform,
WithImplicitDimensions(IdentityTransform(3),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexTransformTest, WithImplicitDimensionsIndexArray) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform,
IndexTransformBuilder(1, 1)
.input_shape({3})
.output_index_array(0, 0, 1, MakeArray<Index>({0, 1, 2}))
.Finalize());
EXPECT_EQ(
expected_transform,
WithImplicitDimensions(expected_transform, DimensionSet::FromBools({1}),
DimensionSet::FromBools({1})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder(1)
.shape({3})
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.Finalize());
EXPECT_EQ(expected_domain,
WithImplicitDimensions(expected_transform.domain(),
DimensionSet::FromBools({1}),
DimensionSet::FromBools({1})));
}
TEST(IndexTransformTest, WithImplicitDimensionsStaticRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_transform,
(IndexTransformBuilder<3, 3>()
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.output_identity_transform()
.Finalize()));
EXPECT_EQ(expected_transform,
WithImplicitDimensions(IdentityTransform<3>(),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, WithImplicitDimensions) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder(3)
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.Finalize());
EXPECT_EQ(
expected_domain,
WithImplicitDimensions(IndexDomain(3), DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, WithImplicitDimensionsStaticRank) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder<3>()
.implicit_lower_bounds({0, 1, 1})
.implicit_upper_bounds({1, 0, 1})
.Finalize());
EXPECT_EQ(expected_domain,
WithImplicitDimensions(IndexDomain<3>(tensorstore::StaticRank<3>{}),
DimensionSet::FromBools({0, 1, 1}),
DimensionSet::FromBools({1, 0, 1})));
}
TEST(IndexDomainTest, ApplyIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain,
IndexDomainBuilder<3>().origin({1, 2, 3}).shape({5, 5, 5}).Finalize());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, (IndexTransformBuilder<4, 3>()
.output_single_input_dimension(0, 5, 1, 3)
.output_single_input_dimension(1, -7, 1, 0)
.output_single_input_dimension(2, 3, 1, 1)
.Finalize()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_domain,
IndexDomainBuilder<4>()
.origin({9, 0, -kInfIndex, -4})
.shape({5, 5, tensorstore::kInfSize, 5})
.implicit_lower_bounds({0, 0, 1, 0})
.implicit_upper_bounds({0, 0, 1, 0})
.Finalize());
EXPECT_THAT(domain | transform, ::testing::Optional(expected_domain));
}
TEST(IndexTransformSerializationTest, Basic) {
TestSerializationRoundTrip(tensorstore::IndexTransform<>());
TestSerializationRoundTrip(tensorstore::IdentityTransform(5));
}
TEST(IndexDomainSerializationTest, Basic) {
TestSerializationRoundTrip(tensorstore::IndexDomain<>());
TestSerializationRoundTrip(
tensorstore::IndexDomain<>(tensorstore::IdentityTransform(5).domain()));
}
TEST(ComputeInputDimensionReferenceCountsTest, Identity) {
DimensionIndex reference_counts[3];
ComputeInputDimensionReferenceCounts(IdentityTransform(3), reference_counts);
EXPECT_THAT(reference_counts, ::testing::ElementsAre(1, 1, 1));
}
TEST(ComputeInputDimensionReferenceCountsTest, IndexArray) {
DimensionIndex reference_counts[3];
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(3, 1)
.input_shape({2, 2, 2})
.output_index_array(0, 0, 1, MakeArray<Index>({{{1, 2}, {3, 4}}}))
.Finalize());
ComputeInputDimensionReferenceCounts(transform, reference_counts);
EXPECT_THAT(reference_counts, ::testing::ElementsAre(0, 1, 1));
}
TEST(GetInputDimensionsForOutputDimensionTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
IndexTransformBuilder(3, 3)
.input_shape({2, 2, 2})
.output_constant(0, 42)
.output_single_input_dimension(1, 0, 1, 1)
.output_index_array(2, 0, 1, MakeArray<Index>({{{1, 2}, {3, 4}}}))
.Finalize());
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 0),
::testing::Pair(DimensionSet(), false));
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 1),
::testing::Pair(DimensionSet::FromBools({0, 1, 0}), false));
EXPECT_THAT(GetInputDimensionsForOutputDimension(transform, 2),
::testing::Pair(DimensionSet::FromBools({0, 1, 1}), true));
}
TEST(TranslateOutputDimensionsByTest, Basic) {
auto orig_transform = IdentityTransform(3);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform, IndexTransformBuilder(3, 3)
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 2, 1, 1)
.output_single_input_dimension(2, 3, 1, 2)
.Finalize());
EXPECT_THAT(TranslateOutputDimensionsBy(orig_transform, {{1, 2, 3}}),
::testing::Optional(expected_transform));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/index_transform_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d5aa5d03-9969-4c67-b9de-22138d23dbba | cpp | google/cel-cpp | value_factory | common/value_factory.cc | common/value_factory_test.cc | #include "common/value_factory.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/casting.h"
#include "common/internal/arena_string.h"
#include "common/internal/reference_count.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/native_type.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/status_macros.h"
#include "internal/time.h"
#include "internal/utf8.h"
namespace cel {
namespace {
void JsonToValue(const Json& json, ValueFactory& value_factory, Value& result) {
absl::visit(
absl::Overload(
[&result](JsonNull) { result = NullValue(); },
[&result](JsonBool value) { result = BoolValue(value); },
[&result](JsonNumber value) { result = DoubleValue(value); },
[&result](const JsonString& value) { result = StringValue(value); },
[&value_factory, &result](const JsonArray& value) {
result = value_factory.CreateListValueFromJsonArray(value);
},
[&value_factory, &result](const JsonObject& value) {
result = value_factory.CreateMapValueFromJsonObject(value);
}),
json);
}
void JsonDebugString(const Json& json, std::string& out);
void JsonArrayDebugString(const JsonArray& json, std::string& out) {
out.push_back('[');
auto element = json.begin();
if (element != json.end()) {
JsonDebugString(*element, out);
++element;
for (; element != json.end(); ++element) {
out.append(", ");
JsonDebugString(*element, out);
}
}
out.push_back(']');
}
void JsonObjectEntryDebugString(const JsonString& key, const Json& value,
std::string& out) {
out.append(StringValue(key).DebugString());
out.append(": ");
JsonDebugString(value, out);
}
void JsonObjectDebugString(const JsonObject& json, std::string& out) {
std::vector<JsonString> keys;
keys.reserve(json.size());
for (const auto& entry : json) {
keys.push_back(entry.first);
}
std::stable_sort(keys.begin(), keys.end());
out.push_back('{');
auto key = keys.begin();
if (key != keys.end()) {
JsonObjectEntryDebugString(*key, json.find(*key)->second, out);
++key;
for (; key != keys.end(); ++key) {
out.append(", ");
JsonObjectEntryDebugString(*key, json.find(*key)->second, out);
}
}
out.push_back('}');
}
void JsonDebugString(const Json& json, std::string& out) {
absl::visit(
absl::Overload(
[&out](JsonNull) -> void { out.append(NullValue().DebugString()); },
[&out](JsonBool value) -> void {
out.append(BoolValue(value).DebugString());
},
[&out](JsonNumber value) -> void {
out.append(DoubleValue(value).DebugString());
},
[&out](const JsonString& value) -> void {
out.append(StringValue(value).DebugString());
},
[&out](const JsonArray& value) -> void {
JsonArrayDebugString(value, out);
},
[&out](const JsonObject& value) -> void {
JsonObjectDebugString(value, out);
}),
json);
}
class JsonListValue final : public ParsedListValueInterface {
public:
explicit JsonListValue(JsonArray array) : array_(std::move(array)) {}
std::string DebugString() const override {
std::string out;
JsonArrayDebugString(array_, out);
return out;
}
bool IsEmpty() const override { return array_.empty(); }
size_t Size() const override { return array_.size(); }
absl::StatusOr<JsonArray> ConvertToJsonArray(
AnyToJsonConverter&) const override {
return array_;
}
private:
absl::Status GetImpl(ValueManager& value_manager, size_t index,
Value& result) const override {
JsonToValue(array_[index], value_manager, result);
return absl::OkStatus();
}
NativeTypeId GetNativeTypeId() const noexcept override {
return NativeTypeId::For<JsonListValue>();
}
const JsonArray array_;
};
class JsonMapValueKeyIterator final : public ValueIterator {
public:
explicit JsonMapValueKeyIterator(
const JsonObject& object ABSL_ATTRIBUTE_LIFETIME_BOUND)
: begin_(object.begin()), end_(object.end()) {}
bool HasNext() override { return begin_ != end_; }
absl::Status Next(ValueManager&, Value& result) override {
if (ABSL_PREDICT_FALSE(begin_ == end_)) {
return absl::FailedPreconditionError(
"ValueIterator::Next() called when "
"ValueIterator::HasNext() returns false");
}
const auto& key = begin_->first;
++begin_;
result = StringValue(key);
return absl::OkStatus();
}
private:
typename JsonObject::const_iterator begin_;
typename JsonObject::const_iterator end_;
};
class JsonMapValue final : public ParsedMapValueInterface {
public:
explicit JsonMapValue(JsonObject object) : object_(std::move(object)) {}
std::string DebugString() const override {
std::string out;
JsonObjectDebugString(object_, out);
return out;
}
bool IsEmpty() const override { return object_.empty(); }
size_t Size() const override { return object_.size(); }
absl::Status ListKeys(ValueManager& value_manager,
ListValue& result) const override {
JsonArrayBuilder keys;
keys.reserve(object_.size());
for (const auto& entry : object_) {
keys.push_back(entry.first);
}
result = ParsedListValue(
value_manager.GetMemoryManager().MakeShared<JsonListValue>(
std::move(keys).Build()));
return absl::OkStatus();
}
absl::StatusOr<absl::Nonnull<ValueIteratorPtr>> NewIterator(
ValueManager&) const override {
return std::make_unique<JsonMapValueKeyIterator>(object_);
}
absl::StatusOr<JsonObject> ConvertToJsonObject(
AnyToJsonConverter&) const override {
return object_;
}
private:
absl::StatusOr<bool> FindImpl(ValueManager& value_manager, const Value& key,
Value& result) const override {
return Cast<StringValue>(key).NativeValue(absl::Overload(
[this, &value_manager, &result](absl::string_view value) -> bool {
if (auto entry = object_.find(value); entry != object_.end()) {
JsonToValue(entry->second, value_manager, result);
return true;
}
return false;
},
[this, &value_manager, &result](const absl::Cord& value) -> bool {
if (auto entry = object_.find(value); entry != object_.end()) {
JsonToValue(entry->second, value_manager, result);
return true;
}
return false;
}));
}
absl::StatusOr<bool> HasImpl(ValueManager&, const Value& key) const override {
return Cast<StringValue>(key).NativeValue(absl::Overload(
[this](absl::string_view value) -> bool {
return object_.contains(value);
},
[this](const absl::Cord& value) -> bool {
return object_.contains(value);
}));
}
NativeTypeId GetNativeTypeId() const noexcept override {
return NativeTypeId::For<JsonMapValue>();
}
const JsonObject object_;
};
}
Value ValueFactory::CreateValueFromJson(Json json) {
return absl::visit(
absl::Overload(
[](JsonNull) -> Value { return NullValue(); },
[](JsonBool value) -> Value { return BoolValue(value); },
[](JsonNumber value) -> Value { return DoubleValue(value); },
[](const JsonString& value) -> Value { return StringValue(value); },
[this](JsonArray value) -> Value {
return CreateListValueFromJsonArray(std::move(value));
},
[this](JsonObject value) -> Value {
return CreateMapValueFromJsonObject(std::move(value));
}),
std::move(json));
}
ListValue ValueFactory::CreateListValueFromJsonArray(JsonArray json) {
if (json.empty()) {
return ListValue(GetZeroDynListValue());
}
return ParsedListValue(
GetMemoryManager().MakeShared<JsonListValue>(std::move(json)));
}
MapValue ValueFactory::CreateMapValueFromJsonObject(JsonObject json) {
if (json.empty()) {
return MapValue(GetZeroStringDynMapValue());
}
return ParsedMapValue(
GetMemoryManager().MakeShared<JsonMapValue>(std::move(json)));
}
ListValue ValueFactory::GetZeroDynListValue() { return ListValue(); }
MapValue ValueFactory::GetZeroDynDynMapValue() { return MapValue(); }
MapValue ValueFactory::GetZeroStringDynMapValue() { return MapValue(); }
OptionalValue ValueFactory::GetZeroDynOptionalValue() {
return OptionalValue();
}
namespace {
class ReferenceCountedString final : public common_internal::ReferenceCounted {
public:
static const ReferenceCountedString* New(std::string&& string) {
return new ReferenceCountedString(std::move(string));
}
const char* data() const {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->data();
}
size_t size() const {
return std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->size();
}
private:
explicit ReferenceCountedString(std::string&& robbed) : ReferenceCounted() {
::new (static_cast<void*>(&string_[0])) std::string(std::move(robbed));
}
void Finalize() noexcept override {
std::launder(reinterpret_cast<const std::string*>(&string_[0]))
->~basic_string();
}
alignas(std::string) char string_[sizeof(std::string)];
};
}
static void StringDestructor(void* string) {
static_cast<std::string*>(string)->~basic_string();
}
absl::StatusOr<BytesValue> ValueFactory::CreateBytesValue(std::string value) {
auto memory_manager = GetMemoryManager();
switch (memory_manager.memory_management()) {
case MemoryManagement::kPooling: {
auto* string = ::new (
memory_manager.Allocate(sizeof(std::string), alignof(std::string)))
std::string(std::move(value));
memory_manager.OwnCustomDestructor(string, &StringDestructor);
return BytesValue{common_internal::ArenaString(*string)};
}
case MemoryManagement::kReferenceCounting: {
auto* refcount = ReferenceCountedString::New(std::move(value));
auto bytes_value = BytesValue{common_internal::SharedByteString(
refcount, absl::string_view(refcount->data(), refcount->size()))};
common_internal::StrongUnref(*refcount);
return bytes_value;
}
}
}
StringValue ValueFactory::CreateUncheckedStringValue(std::string value) {
auto memory_manager = GetMemoryManager();
switch (memory_manager.memory_management()) {
case MemoryManagement::kPooling: {
auto* string = ::new (
memory_manager.Allocate(sizeof(std::string), alignof(std::string)))
std::string(std::move(value));
memory_manager.OwnCustomDestructor(string, &StringDestructor);
return StringValue{common_internal::ArenaString(*string)};
}
case MemoryManagement::kReferenceCounting: {
auto* refcount = ReferenceCountedString::New(std::move(value));
auto string_value = StringValue{common_internal::SharedByteString(
refcount, absl::string_view(refcount->data(), refcount->size()))};
common_internal::StrongUnref(*refcount);
return string_value;
}
}
}
absl::StatusOr<StringValue> ValueFactory::CreateStringValue(std::string value) {
auto [count, ok] = internal::Utf8Validate(value);
if (ABSL_PREDICT_FALSE(!ok)) {
return absl::InvalidArgumentError(
"Illegal byte sequence in UTF-8 encoded string");
}
return CreateUncheckedStringValue(std::move(value));
}
absl::StatusOr<StringValue> ValueFactory::CreateStringValue(absl::Cord value) {
auto [count, ok] = internal::Utf8Validate(value);
if (ABSL_PREDICT_FALSE(!ok)) {
return absl::InvalidArgumentError(
"Illegal byte sequence in UTF-8 encoded string");
}
return StringValue(std::move(value));
}
absl::StatusOr<DurationValue> ValueFactory::CreateDurationValue(
absl::Duration value) {
CEL_RETURN_IF_ERROR(internal::ValidateDuration(value));
return DurationValue{value};
}
absl::StatusOr<TimestampValue> ValueFactory::CreateTimestampValue(
absl::Time value) {
CEL_RETURN_IF_ERROR(internal::ValidateTimestamp(value));
return TimestampValue{value};
}
} | #include "common/value_factory.h"
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/memory_testing.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::TestParamInfo;
using ::testing::UnorderedElementsAreArray;
class ValueFactoryTest : public common_internal::ThreadCompatibleMemoryTest<> {
public:
void SetUp() override {
value_manager_ = NewThreadCompatibleValueManager(
memory_manager(), NewThreadCompatibleTypeReflector(memory_manager()));
}
void TearDown() override { Finish(); }
void Finish() {
value_manager_.reset();
ThreadCompatibleMemoryTest::Finish();
}
TypeFactory& type_factory() const { return value_manager(); }
TypeManager& type_manager() const { return value_manager(); }
ValueFactory& value_factory() const { return value_manager(); }
ValueManager& value_manager() const { return **value_manager_; }
static std::string ToString(
TestParamInfo<std::tuple<MemoryManagement>> param) {
std::ostringstream out;
out << std::get<0>(param.param);
return out.str();
}
private:
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ValueFactoryTest, JsonValueNull) {
auto value = value_factory().CreateValueFromJson(kJsonNull);
EXPECT_TRUE(InstanceOf<NullValue>(value));
}
TEST_P(ValueFactoryTest, JsonValueBool) {
auto value = value_factory().CreateValueFromJson(true);
ASSERT_TRUE(InstanceOf<BoolValue>(value));
EXPECT_TRUE(Cast<BoolValue>(value).NativeValue());
}
TEST_P(ValueFactoryTest, JsonValueNumber) {
auto value = value_factory().CreateValueFromJson(1.0);
ASSERT_TRUE(InstanceOf<DoubleValue>(value));
EXPECT_EQ(Cast<DoubleValue>(value).NativeValue(), 1.0);
}
TEST_P(ValueFactoryTest, JsonValueString) {
auto value = value_factory().CreateValueFromJson(absl::Cord("foo"));
ASSERT_TRUE(InstanceOf<StringValue>(value));
EXPECT_EQ(Cast<StringValue>(value).NativeString(), "foo");
}
JsonObject NewJsonObjectForTesting(bool with_array = true,
bool with_nested_object = true);
JsonArray NewJsonArrayForTesting(bool with_nested_array = true,
bool with_object = true) {
JsonArrayBuilder builder;
builder.push_back(kJsonNull);
builder.push_back(true);
builder.push_back(1.0);
builder.push_back(absl::Cord("foo"));
if (with_nested_array) {
builder.push_back(NewJsonArrayForTesting(false, false));
}
if (with_object) {
builder.push_back(NewJsonObjectForTesting(false, false));
}
return std::move(builder).Build();
}
JsonObject NewJsonObjectForTesting(bool with_array, bool with_nested_object) {
JsonObjectBuilder builder;
builder.insert_or_assign(absl::Cord("a"), kJsonNull);
builder.insert_or_assign(absl::Cord("b"), true);
builder.insert_or_assign(absl::Cord("c"), 1.0);
builder.insert_or_assign(absl::Cord("d"), absl::Cord("foo"));
if (with_array) {
builder.insert_or_assign(absl::Cord("e"),
NewJsonArrayForTesting(false, false));
}
if (with_nested_object) {
builder.insert_or_assign(absl::Cord("f"),
NewJsonObjectForTesting(false, false));
}
return std::move(builder).Build();
}
TEST_P(ValueFactoryTest, JsonValueArray) {
auto value = value_factory().CreateValueFromJson(NewJsonArrayForTesting());
ASSERT_TRUE(InstanceOf<ListValue>(value));
EXPECT_EQ(Type(value.GetRuntimeType()), type_factory().GetDynListType());
auto list_value = Cast<ListValue>(value);
EXPECT_THAT(list_value.IsEmpty(), IsOkAndHolds(false));
EXPECT_THAT(list_value.Size(), IsOkAndHolds(6));
EXPECT_EQ(list_value.DebugString(),
"[null, true, 1.0, \"foo\", [null, true, 1.0, \"foo\"], {\"a\": "
"null, \"b\": true, \"c\": 1.0, \"d\": \"foo\"}]");
ASSERT_OK_AND_ASSIGN(auto element, list_value.Get(value_manager(), 0));
EXPECT_TRUE(InstanceOf<NullValue>(element));
}
TEST_P(ValueFactoryTest, JsonValueObject) {
auto value = value_factory().CreateValueFromJson(NewJsonObjectForTesting());
ASSERT_TRUE(InstanceOf<MapValue>(value));
auto map_value = Cast<MapValue>(value);
EXPECT_THAT(map_value.IsEmpty(), IsOkAndHolds(false));
EXPECT_THAT(map_value.Size(), IsOkAndHolds(6));
EXPECT_EQ(map_value.DebugString(),
"{\"a\": null, \"b\": true, \"c\": 1.0, \"d\": \"foo\", \"e\": "
"[null, true, 1.0, \"foo\"], \"f\": {\"a\": null, \"b\": true, "
"\"c\": 1.0, \"d\": \"foo\"}}");
ASSERT_OK_AND_ASSIGN(auto keys, map_value.ListKeys(value_manager()));
EXPECT_THAT(keys.Size(), IsOkAndHolds(6));
ASSERT_OK_AND_ASSIGN(auto keys_iterator,
map_value.NewIterator(value_manager()));
std::vector<StringValue> string_keys;
while (keys_iterator->HasNext()) {
ASSERT_OK_AND_ASSIGN(auto key, keys_iterator->Next(value_manager()));
string_keys.push_back(StringValue(Cast<StringValue>(key)));
}
EXPECT_THAT(string_keys,
UnorderedElementsAreArray({StringValue("a"), StringValue("b"),
StringValue("c"), StringValue("d"),
StringValue("e"), StringValue("f")}));
ASSERT_OK_AND_ASSIGN(auto has,
map_value.Has(value_manager(), StringValue("a")));
ASSERT_TRUE(InstanceOf<BoolValue>(has));
EXPECT_TRUE(Cast<BoolValue>(has).NativeValue());
ASSERT_OK_AND_ASSIGN(
has, map_value.Has(value_manager(), StringValue(absl::Cord("a"))));
ASSERT_TRUE(InstanceOf<BoolValue>(has));
EXPECT_TRUE(Cast<BoolValue>(has).NativeValue());
ASSERT_OK_AND_ASSIGN(auto get,
map_value.Get(value_manager(), StringValue("a")));
ASSERT_TRUE(InstanceOf<NullValue>(get));
ASSERT_OK_AND_ASSIGN(
get, map_value.Get(value_manager(), StringValue(absl::Cord("a"))));
ASSERT_TRUE(InstanceOf<NullValue>(get));
}
INSTANTIATE_TEST_SUITE_P(
ValueFactoryTest, ValueFactoryTest,
::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting),
ValueFactoryTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value_factory.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/value_factory_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3ca3fc07-51cc-43e8-9de3-a65284addc16 | cpp | tensorflow/tensorflow | quantize_op | tensorflow/core/kernels/quantize_op.cc | tensorflow/core/kernels/quantize_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/cwise_ops.h"
#include "tensorflow/core/kernels/meta_support.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
namespace {
enum {
QUANTIZE_MODE_MIN_COMBINED,
QUANTIZE_MODE_MIN_FIRST,
QUANTIZE_MODE_SCALED,
};
enum {
ROUND_HALF_AWAY_FROM_ZERO,
ROUND_HALF_TO_EVEN,
};
}
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T>
class QuantizeV2Op : public OpKernel {
public:
explicit QuantizeV2Op(OpKernelConstruction* ctx) : OpKernel(ctx) {
half_range_ =
!std::is_signed<T>::value
? 0.0f
: (static_cast<double>(std::numeric_limits<T>::max()) -
static_cast<double>(std::numeric_limits<T>::min()) + 1) /
2.0f;
string mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("mode", &mode_string));
OP_REQUIRES(ctx,
(mode_string == "MIN_COMBINED" || mode_string == "MIN_FIRST" ||
mode_string == "SCALED"),
errors::InvalidArgument("Mode string must be 'MIN_COMBINED',"
" 'MIN_FIRST', or 'SCALED', is '" +
mode_string + "'"));
if (mode_string == "MIN_COMBINED") {
mode_ = QUANTIZE_MODE_MIN_COMBINED;
} else if (mode_string == "MIN_FIRST") {
mode_ = QUANTIZE_MODE_MIN_FIRST;
} else if (mode_string == "SCALED") {
mode_ = QUANTIZE_MODE_SCALED;
}
string round_mode_string;
OP_REQUIRES_OK(ctx, ctx->GetAttr("round_mode", &round_mode_string));
OP_REQUIRES(ctx,
(round_mode_string == "HALF_AWAY_FROM_ZERO" ||
round_mode_string == "HALF_TO_EVEN"),
errors::InvalidArgument("Round mode string must be "
"'HALF_AWAY_FROM_ZERO' or "
"'HALF_TO_EVEN', is '" +
round_mode_string + "'"));
if (round_mode_string == "HALF_AWAY_FROM_ZERO") {
round_mode_ = ROUND_HALF_AWAY_FROM_ZERO;
} else if (round_mode_string == "HALF_TO_EVEN") {
OP_REQUIRES(ctx, mode_string == "SCALED",
errors::InvalidArgument("Round mode 'HALF_TO_EVEN' "
"only supported for mode 'SCALED', "
"b ut mode is '" +
mode_string + "'."));
round_mode_ = ROUND_HALF_TO_EVEN;
}
OP_REQUIRES_OK(ctx, ctx->GetAttr("narrow_range", &narrow_range_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("axis", &axis_));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("ensure_minimum_range", &ensure_minimum_range_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
const Tensor& input_min_range = ctx->input(1);
const Tensor& input_max_range = ctx->input(2);
int num_slices = 1;
if (axis_ > -1) {
OP_REQUIRES(
ctx, input.dims() > axis_,
errors::InvalidArgument(
"Axis is on a zero-based index, so its value must always be less "
"than number of input's dims, but given axis value was ",
axis_, " and input's dims was ", input.dims()));
num_slices = input.dim_size(axis_);
OP_REQUIRES(ctx, input_min_range.dims() == 1,
errors::InvalidArgument(
"If axis is specified, min_range must be a 1-D tensor "
"whose size matches the axis dimension of the input and "
"output tensors, but min_range dims are ",
input_min_range.dims()));
OP_REQUIRES(ctx, input_min_range.dim_size(0) == num_slices,
errors::InvalidArgument(
"If axis is specified, min_range must be a 1-D tensor "
"whose size matches the axis dimension of the input and "
"output tensors, but min_range is a 1-D tensor of size ",
input_min_range.dim_size(0),
" and input's axis dimension is of size ", num_slices));
OP_REQUIRES(ctx, input_max_range.dims() == 1,
errors::InvalidArgument(
"If axis is specified, max_range must be a 1-D tensor "
"whose size matches the axis dimension of the input and "
"output tensors, but max_range dims are ",
input_max_range.dims()));
OP_REQUIRES(ctx, input_max_range.dim_size(0) == num_slices,
errors::InvalidArgument(
"If axis is specified, max_range must be a 1-D tensor "
"whose size matches the axis dimension of the input and "
"output tensors, but max_range is a 1-D tensor of size ",
input_max_range.dim_size(0),
" and input's axis dimension is of size ", num_slices));
} else {
OP_REQUIRES(ctx, input_min_range.NumElements() == 1,
errors::InvalidArgument(
"If axis is not specified, min_range must contain a "
"single float element, but it contains ",
input_min_range.NumElements(), " elements"));
OP_REQUIRES(ctx, input_max_range.NumElements() == 1,
errors::InvalidArgument(
"If axis is not specified, max_range must contain a "
"single float element, but it contains ",
input_max_range.NumElements(), " elements"));
}
const TensorShape& minmax_shape = ctx->input(1).shape();
Tensor* output = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, input.shape(), &output));
Tensor* output_min_tensor = nullptr;
Tensor* output_max_tensor = nullptr;
if (num_slices == 1) {
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, {}, &output_min_tensor));
OP_REQUIRES_OK(ctx, ctx->allocate_output(2, {}, &output_max_tensor));
const float min_range = input_min_range.template flat<float>()(0);
const float max_range = input_max_range.template flat<float>()(0);
QuantizeTensor(ctx, input, min_range, max_range, output,
output_min_tensor, output_max_tensor);
return;
}
OP_REQUIRES(ctx, mode_ != QUANTIZE_MODE_MIN_FIRST,
errors::Unimplemented("MIN_FIRST mode is not implemented for "
"Quantize with axis != -1."));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(1, minmax_shape, &output_min_tensor));
OP_REQUIRES_OK(ctx,
ctx->allocate_output(2, minmax_shape, &output_max_tensor));
auto input_tensor =
input.template flat_inner_outer_dims<float, 3>(axis_ - 1);
int64_t pre_dim = 1, post_dim = 1;
for (int i = 0; i < axis_; ++i) {
pre_dim *= output->dim_size(i);
}
for (int i = axis_ + 1; i < output->dims(); ++i) {
post_dim *= output->dim_size(i);
}
auto output_tensor = output->template bit_casted_shaped<T, 3>(
{pre_dim, num_slices, post_dim});
auto min_ranges = input_min_range.template vec<float>();
auto max_ranges = input_max_range.template vec<float>();
for (int i = 0; i < num_slices; ++i) {
QuantizeSlice(ctx->eigen_device<Device>(), ctx,
input_tensor.template chip<1>(i), min_ranges(i),
max_ranges(i), output_tensor.template chip<1>(i),
&output_min_tensor->flat<float>()(i),
&output_max_tensor->flat<float>()(i));
}
}
void QuantizeTensor(OpKernelContext* ctx, const Tensor& input,
const float input_min_range, const float input_max_range,
Tensor* output, Tensor* output_min_tensor,
Tensor* output_max_tensor) {
OP_REQUIRES(ctx, !(input_max_range < input_min_range),
errors::InvalidArgument(
"input_max_range must be larger than input_min_range."));
float min_range = std::min(0.0f, input_min_range);
const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range),
fabsf(input_max_range))) *
ensure_minimum_range_;
float max_range =
std::max(0.0f, std::max(input_max_range, min_range + epsilon));
if (mode_ == QUANTIZE_MODE_MIN_FIRST) {
if (meta::IsSupportedAndEnabled() && std::is_same<T, quint8>()) {
TTypes<const float>::Vec input_array = input.flat<float>();
meta::Quantize(ctx, input_array.data(), input_array.size(), min_range,
max_range, output->flat<quint8>().data());
} else {
FloatTensorToQuantizedInPlaceUsingEigen<T>(
ctx->template eigen_device<Device>(), input, min_range, max_range,
output);
}
output_min_tensor->flat<float>()(0) = min_range;
output_max_tensor->flat<float>()(0) = max_range;
} else {
QuantizeSlice(ctx->eigen_device<Device>(), ctx, input.flat<float>(),
input_min_range, input_max_range,
output->template flat<T>(),
&output_min_tensor->flat<float>()(0),
&output_max_tensor->flat<float>()(0));
}
}
template <typename ConstVec, typename Vec>
void QuantizeSlice(const Device& d, OpKernelContext* ctx,
const ConstVec& input, float input_min_range,
float input_max_range, Vec output, float* output_min_range,
float* output_max_range) {
OP_REQUIRES(ctx, !(input_max_range < input_min_range),
errors::InvalidArgument(
"input_max_range must be larger than input_min_range."));
float min_range = std::min(0.0f, input_min_range);
const float epsilon = std::max(1.0f, std::max(fabsf(input_min_range),
fabsf(input_max_range))) *
ensure_minimum_range_;
float max_range =
std::max(0.0f, std::max(input_max_range, min_range + epsilon));
if (mode_ == QUANTIZE_MODE_MIN_COMBINED) {
const float scale_factor =
(static_cast<double>(std::numeric_limits<T>::max()) -
static_cast<double>(std::numeric_limits<T>::min())) /
(max_range - min_range);
bool is_signed = std::is_signed<T>::value;
if (is_signed) {
output.device(d) =
((input.cwiseMin(max_range).cwiseMax(min_range) - min_range) *
scale_factor -
half_range_)
.round()
.template cast<T>();
} else {
output.device(d) =
((input.cwiseMin(max_range).cwiseMax(min_range) - min_range) *
scale_factor +
0.5f)
.template cast<T>();
}
} else if (mode_ == QUANTIZE_MODE_SCALED) {
const int min_output_value =
std::numeric_limits<T>::min() + (narrow_range_ ? 1 : 0);
const int max_output_value = std::numeric_limits<T>::max();
const float scale_factor_from_min_side =
(min_output_value * min_range > 0)
? min_output_value / min_range
: std::numeric_limits<float>::max();
const float scale_factor_from_max_side =
(max_output_value * max_range > 0)
? max_output_value / max_range
: std::numeric_limits<float>::max();
const float scale_factor =
std::min(scale_factor_from_min_side, scale_factor_from_max_side);
min_range = min_output_value / scale_factor;
max_range = max_output_value / scale_factor;
if (round_mode_ == ROUND_HALF_TO_EVEN) {
output.device(d) =
(input.cwiseMin(max_range).cwiseMax(min_range) * scale_factor)
.unaryExpr(
Eigen::internal::scalar_round_half_to_even_op<float>())
.template cast<T>();
} else if (round_mode_ == ROUND_HALF_AWAY_FROM_ZERO) {
output.device(d) =
(input.cwiseMin(max_range).cwiseMax(min_range) * scale_factor)
.round()
.template cast<T>();
}
}
*output_min_range = min_range;
*output_max_range = max_range;
}
private:
float half_range_;
float ensure_minimum_range_;
int mode_;
int round_mode_;
int axis_;
bool narrow_range_;
};
REGISTER_KERNEL_BUILDER(
Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<quint8>("T"),
QuantizeV2Op<CPUDevice, quint8>);
REGISTER_KERNEL_BUILDER(
Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint8>("T"),
QuantizeV2Op<CPUDevice, qint8>);
REGISTER_KERNEL_BUILDER(
Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<quint16>("T"),
QuantizeV2Op<CPUDevice, quint16>);
REGISTER_KERNEL_BUILDER(
Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint16>("T"),
QuantizeV2Op<CPUDevice, qint16>);
REGISTER_KERNEL_BUILDER(
Name("QuantizeV2").Device(DEVICE_CPU).TypeConstraint<qint32>("T"),
QuantizeV2Op<CPUDevice, qint32>);
} | #include <random>
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
class QuantizedOpTest : public OpsTestBase {
protected:
};
struct ParameterizedQuantizeOpTest : public OpsTestBase,
public ::testing::WithParamInterface<int> {
};
TEST_F(QuantizedOpTest, QuantizeV2) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({7}),
{0.0, 1.0, 1.25, 1.75, 127.0, 255.0, 500.0});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({7}));
test::FillValues<quint8>(&expected, {0, 1, 1, 2, 127, 255, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
template <typename T>
std::vector<T> ScalePerSliceAlongAxis(std::vector<int64_t> dims, int axis,
const std::vector<T>& data) {
uint32 seed = 123;
std::minstd_rand rng(seed);
int64_t out_size = 1;
for (int dim : dims) {
out_size *= dim;
}
int minor_size = 1;
for (int i = axis + 1; i < dims.size(); ++i) {
minor_size *= dims[i];
}
std::vector<T> out(out_size);
int num_slices = (axis == -1) ? 1 : dims[axis];
for (int out_idx = 0; out_idx < out_size; ++out_idx) {
int in_idx = rng() % data.size();
T multiplier = ((out_idx / minor_size) % num_slices) + 1;
out[out_idx] = data[in_idx] * multiplier;
}
return out;
}
TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Quint8Scaled) {
const int axis = GetParam();
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "SCALED")
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
int num_slices = (axis == -1) ? 1 : dims[axis];
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-255.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0, 500.0}));
std::vector<float> min_ranges(num_slices), max_ranges(num_slices);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
min_ranges[slice_idx] = (slice_idx + 1) * -255.0;
max_ranges[slice_idx] = (slice_idx + 1) * 127.0;
}
AddInputFromArray<float>(TensorShape({num_slices}), min_ranges);
AddInputFromArray<float>(TensorShape({num_slices}), max_ranges);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape(dims));
test::FillValues<quint8>(
&expected,
ScalePerSliceAlongAxis<quint8>(dims, -1, {0, 0, 2, 3, 4, 129, 255, 255}));
auto output_min = *GetOutput(1);
auto output_max = *GetOutput(2);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(output_min.flat<float>()(slice_idx), 0);
EXPECT_EQ(output_max.flat<float>()(slice_idx), 127.0 * (slice_idx + 1));
}
auto output = *GetOutput(0);
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
}
TEST_F(QuantizedOpTest, QuantizeV2Quint8ScaledSmallInputRange) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "SCALED")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({3}), {-1.0, 0.0, 2.0});
AddInputFromArray<float>(TensorShape({1}), {-1.0f});
AddInputFromArray<float>(TensorShape({1}), {2.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({3}));
test::FillValues<quint8>(&expected, {0, 0, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_min, {0.0});
test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1));
Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_max, {2.0});
test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2));
}
TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Qint8Scaled) {
const int axis = GetParam();
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("narrow_range", false)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
int num_slices = (axis == -1) ? 1 : dims[axis];
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-128.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0}));
std::vector<float> min_ranges(num_slices), max_ranges(num_slices);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
min_ranges[slice_idx] = (slice_idx + 1) * -128.0;
max_ranges[slice_idx] = (slice_idx + 1) * 100.0;
}
AddInputFromArray<float>(TensorShape({num_slices}), min_ranges);
AddInputFromArray<float>(TensorShape({num_slices}), max_ranges);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape(dims));
test::FillValues<qint8>(
&expected,
ScalePerSliceAlongAxis<qint8>(dims, -1, {-128, 0, 1, 1, 2, 64, 127}));
auto output_min = *GetOutput(1);
auto output_max = *GetOutput(2);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1));
EXPECT_EQ(output_max.flat<float>()(slice_idx), 127.0 * (slice_idx + 1));
}
auto output = *GetOutput(0);
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
TEST_P(ParameterizedQuantizeOpTest, QuantizeV2Qint8ScaledNarrowRange) {
const int axis = GetParam();
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("narrow_range", true)
.Attr("axis", axis)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const std::vector<int64_t> dims = {2, 3, 4, 5};
int num_slices = (axis == -1) ? 1 : dims[axis];
AddInputFromArray<float>(
TensorShape(dims),
ScalePerSliceAlongAxis<float>(
dims, axis, {-128.0, 0.0, 1.0, 1.25, 1.75, 64.0, 127.0}));
std::vector<float> min_ranges(num_slices), max_ranges(num_slices);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
min_ranges[slice_idx] = (slice_idx + 1) * -128.0;
max_ranges[slice_idx] = (slice_idx + 1) * 100.0;
}
AddInputFromArray<float>(TensorShape({num_slices}), min_ranges);
AddInputFromArray<float>(TensorShape({num_slices}), max_ranges);
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape(dims));
test::FillValues<qint8>(
&expected,
ScalePerSliceAlongAxis<qint8>(dims, -1, {-127, 0, 1, 1, 2, 64, 126}));
auto output_min = *GetOutput(1);
auto output_max = *GetOutput(2);
for (int slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
EXPECT_EQ(output_min.flat<float>()(slice_idx), -128.0 * (slice_idx + 1));
EXPECT_EQ(output_max.flat<float>()(slice_idx), 128.0 * (slice_idx + 1));
}
auto output = *GetOutput(0);
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
}
INSTANTIATE_TEST_SUITE_P(All, ParameterizedQuantizeOpTest,
::testing::Values(-1, 1, 3));
TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledSmallInputRange) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({3}), {-0.064, 0.0, 0.127});
AddInputFromArray<float>(TensorShape({1}), {-0.064f});
AddInputFromArray<float>(TensorShape({1}), {0.127f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({3}));
test::FillValues<qint8>(&expected, {-64, 0, 127});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_min, {-0.128});
test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1));
Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_max, {0.127});
test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2));
}
TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledRoundToEven) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("round_mode", "HALF_TO_EVEN")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({7}),
{-126.5, 0.0, 1.0, 2.5, 3.5, 64.0, 127.0});
AddInputFromArray<float>(TensorShape({1}), {-128.0f});
AddInputFromArray<float>(TensorShape({1}), {-128.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({7}));
test::FillValues<qint8>(&expected, {-126, 0, 1, 2, 4, 64, 127});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_min, {-128.0});
test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1));
Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_max, {127.0});
test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2));
}
TEST_F(QuantizedOpTest, QuantizeV2Qint8ScaledRoundAwayFromZero) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "SCALED")
.Attr("round_mode", "HALF_AWAY_FROM_ZERO")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({7}),
{-126.5, 0.0, 1.0, 2.5, 3.5, 64.0, 127.0});
AddInputFromArray<float>(TensorShape({1}), {-128.0f});
AddInputFromArray<float>(TensorShape({1}), {-128.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({7}));
test::FillValues<qint8>(&expected, {-127, 0, 1, 3, 4, 64, 127});
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
Tensor expected_output_min(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_min, {-128.0});
test::ExpectTensorEqual<float>(expected_output_min, *GetOutput(1));
Tensor expected_output_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_output_max, {127.0});
test::ExpectTensorEqual<float>(expected_output_max, *GetOutput(2));
}
TEST_F(QuantizedOpTest, QuantizeV2_32Bit) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint32>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int element_count = 8;
AddInputFromArray<float>(
TensorShape({element_count}),
{-500.0f, 0.0f, 1.0f, 1.25f, 1.75f, 127.0f, 255.0f, 500.0f});
AddInputFromArray<float>(TensorShape({1}), {-256.0f});
AddInputFromArray<float>(TensorShape({1}), {256.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT32, TensorShape({element_count}));
test::FillValues<qint32>(&expected,
{
std::numeric_limits<int32>::min(),
0,
static_cast<int32>(1.0f * (1 << 23)),
static_cast<int32>(1.25f * (1 << 23)),
static_cast<int32>(1.75f * (1 << 23)),
static_cast<int32>(127.0f * (1 << 23)),
static_cast<int32>(255.0f * (1 << 23)),
std::numeric_limits<int32>::max(),
});
const int64_t epsilon = 1 << 8;
const qint32* output_data = GetOutput(0)->flat<qint32>().data();
const qint32* expected_data = expected.flat<qint32>().data();
for (int i = 0; i < element_count; ++i) {
const int64_t delta = output_data[i] - expected_data[i];
EXPECT_GT(epsilon, std::abs(delta))
<< "output_data[" << i << "]=" << output_data[i] << ", expected_data["
<< i << "]=" << expected_data[i] << ", delta=" << delta;
}
}
TEST_F(QuantizedOpTest, QuantizeV2Ports) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}),
{1.0, 1.25, 1.75, 127.0, 255.0, 500.0});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&expected, {1, 1, 2, 127, 255, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(255.0f, output_max, 1e-5f);
}
TEST_F(QuantizedOpTest, QuantizeV2EqualRange) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({6}), {0.0, 0.0, 0.0, 0.0, 0.0, 0.0});
AddInputFromArray<float>(TensorShape({1}), {0.0f});
AddInputFromArray<float>(TensorShape({1}), {0.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({6}));
test::FillValues<quint8>(&expected, {0, 0, 0, 0, 0, 0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_LT(0.0f, output_max);
}
TEST_F(QuantizedOpTest, QuantizeV2MovesMinToIncludeZero) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({3}), {0.1, 0.2, 0.3});
AddInputFromArray<float>(TensorShape({1}), {0.1});
AddInputFromArray<float>(TensorShape({1}), {0.3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({3}));
test::FillValues<quint8>(&expected, {85, 170, 255});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
EXPECT_NEAR(0.0f, output_min, 1e-5f);
EXPECT_NEAR(0.3f, output_max, 1e-5f);
}
TEST_F(QuantizedOpTest, QuantizeV2MovesMaxToIncludeZero) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({3}), {-0.1, -0.2, -0.3});
AddInputFromArray<float>(TensorShape({1}), {-0.3});
AddInputFromArray<float>(TensorShape({1}), {-0.1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QUINT8, TensorShape({3}));
test::FillValues<quint8>(&expected, {170, 85, 0});
test::ExpectTensorEqual<quint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
EXPECT_NEAR(-0.3f, output_min, 1e-5f);
EXPECT_NEAR(0.0f, output_max, 1e-5f);
}
TEST_F(QuantizedOpTest, Dequantize) {
TF_ASSERT_OK(NodeDefBuilder("dequantize_op", "Dequantize")
.Input(FakeInput(DT_QUINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<quint8>::v())
.Attr("mode", "MIN_FIRST")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<quint8>(TensorShape({6}), {1, 2, 4, 8, 16, 255});
AddInputFromArray<float>(TensorShape({1}), {0});
AddInputFromArray<float>(TensorShape({1}), {255.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({6}));
test::FillValues<float>(&expected, {1.0, 2.0, 4.0, 8.0, 16.0, 255.0});
test::ExpectTensorNear<float>(expected, *GetOutput(0), 0.5);
}
TEST_F(QuantizedOpTest, QuantizeV2DisableEnsureMinimumRange) {
TF_ASSERT_OK(NodeDefBuilder("quantize_op", "QuantizeV2")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("T", DataTypeToEnum<qint8>::v())
.Attr("mode", "MIN_FIRST")
.Attr("ensure_minimum_range", 0.0f)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<float>(TensorShape({3}), {-0.000001, 0.0, 0.000042});
AddInputFromArray<float>(TensorShape({1}), {-0.000128});
AddInputFromArray<float>(TensorShape({1}), {0.000127});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_QINT8, TensorShape({3}));
test::FillValues<qint8>(&expected, {-1, 0, 42});
for (int i = 0; i < 3; ++i) {
LOG(INFO) << GetOutput(0)->flat<qint8>()(i);
}
test::ExpectTensorEqual<qint8>(expected, *GetOutput(0));
const float output_min = GetOutput(1)->flat<float>()(0);
const float output_max = GetOutput(2)->flat<float>()(0);
LOG(INFO) << "output_min = " << output_min;
LOG(INFO) << "output_max = " << output_max;
EXPECT_NEAR(-0.000128f, output_min, 1e-7f);
EXPECT_NEAR(0.000127, output_max, 1e-7f);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2acfe0d1-f7e4-4526-b22b-1e7ab437fdaa | cpp | tensorflow/tensorflow | no_op_cost_measurement | tensorflow/core/common_runtime/no_op_cost_measurement.cc | tensorflow/core/common_runtime/no_op_cost_measurement_test.cc | #include "tensorflow/core/common_runtime/no_op_cost_measurement.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/cost_constants.h"
namespace tensorflow {
absl::Duration NoOpCostMeasurement::GetTotalCost() { return absl::Duration(); }
absl::string_view NoOpCostMeasurement::GetCostType() const {
return kNoOpCostName;
}
REGISTER_COST_MEASUREMENT(kNoOpCostName, NoOpCostMeasurement);
} | #include "tensorflow/core/common_runtime/no_op_cost_measurement.h"
#include "tensorflow/core/common_runtime/cost_measurement.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(NoOpCostMeasurementTest, Basic) {
CostMeasurement::Context context;
NoOpCostMeasurement measurement(context);
EXPECT_EQ(measurement.GetTotalCost(), absl::ZeroDuration());
EXPECT_EQ(measurement.GetCostType(), "no_op");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/no_op_cost_measurement.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/no_op_cost_measurement_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a547991a-c996-49e1-9bda-5e264c5885da | cpp | tensorflow/tensorflow | fallback_state | tensorflow/core/tfrt/fallback/fallback_state.cc | tensorflow/core/tfrt/fallback/fallback_state_test.cc | #include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <new>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/graph_execution_state.h"
#include "tensorflow/core/common_runtime/rendezvous_mgr.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/rendezvous.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/tpu/virtual_device.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/refcount.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
string DeviceName(absl::string_view name_prefix, absl::string_view device_type,
int32_t task_id, size_t device_id) {
return strings::StrCat(absl::StripSuffix(name_prefix, "0"), task_id,
"/device:", device_type, ":", device_id);
}
DeviceAttributes BuildDeviceAttributes(absl::string_view name_prefix,
const char *device_type, int32_t task_id,
size_t device_id) {
const DeviceAttributes attrs = Device::BuildDeviceAttributes(
DeviceName(name_prefix, device_type, task_id, device_id),
DeviceType(device_type), Bytes(16ULL << 30), DeviceLocality(),
strings::StrCat("device: ", device_type, " device"));
return attrs;
}
}
absl::StatusOr<std::unique_ptr<FallbackState>> FallbackState::Create(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithCpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithMockGpuDevice(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib) {
std::vector<std::unique_ptr<Device>> devices;
TF_RETURN_IF_ERROR(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_attrs =
BuildDeviceAttributes("/job:localhost/replica:0/task:0", "GPU", 0, 0);
devices.push_back(
std::make_unique<VirtualDevice>(session_options.env, device_attrs));
return std::make_unique<FallbackState>(session_options, std::move(devices),
fdef_lib);
}
absl::StatusOr<std::unique_ptr<FallbackState>>
FallbackState::CreateWithDeviceMgr(
const SessionOptions &session_options,
const tensorflow::FunctionDefLibrary &fdef_lib,
absl::Nonnull<DynamicDeviceMgr *> device_mgr) {
return std::make_unique<FallbackState>(session_options, device_mgr, fdef_lib);
}
FallbackState::FallbackState(const SessionOptions &session_options,
std::variant<std::vector<std::unique_ptr<Device>>,
absl::Nonnull<DynamicDeviceMgr *>>
device_mgr,
const tensorflow::FunctionDefLibrary &fdef_lib)
: session_options_(session_options),
device_manager_(
std::holds_alternative<std::vector<std::unique_ptr<Device>>>(
device_mgr)
? std::move(
std::get<std::vector<std::unique_ptr<Device>>>(device_mgr))
: std::vector<std::unique_ptr<Device>>()),
device_manager_ptr_(
std::holds_alternative<absl::Nonnull<DynamicDeviceMgr *>>(device_mgr)
? std::get<absl::Nonnull<DynamicDeviceMgr *>>(device_mgr)
: &device_manager_),
func_lib_def_(OpRegistry::Global(), fdef_lib),
pflr_(device_manager_ptr_, session_options.env, &session_options.config,
TF_GRAPH_DEF_VERSION, &func_lib_def_,
session_options.config.graph_options().optimizer_options(),
nullptr, nullptr,
nullptr,
Rendezvous::Factory{[](const int64_t, const DeviceMgr *device_mgr,
tsl::core::RefCountPtr<Rendezvous> *r) {
*r = tsl::core::RefCountPtr<Rendezvous>(
new IntraProcessRendezvous(device_mgr));
return absl::OkStatus();
}}) {
for (auto *d : device_manager_ptr_->ListDevices()) {
device_set_.AddDevice(d);
}
device_set_.set_client_device(device_manager().HostCPU());
}
absl::StatusOr<std::unique_ptr<GraphExecutionState>>
FallbackState::CreateGraphExecutionState(GraphDef graph_def,
bool run_placer) const {
GraphExecutionStateOptions options;
options.device_set = &device_set_;
options.session_options = &session_options_;
options.session_handle = "tfrt_fallback_handle";
options.run_placer = run_placer;
std::unique_ptr<GraphExecutionState> execution_state;
TF_RETURN_IF_ERROR(GraphExecutionState::MakeForBaseGraph(
std::move(graph_def), options, &execution_state));
return execution_state;
}
absl::Status FallbackState::AddFunctionDef(const FunctionDef &func_def) {
return func_lib_def_.AddFunctionDef(func_def);
}
}
} | #include "tensorflow/core/tfrt/fallback/fallback_state.h"
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/base/nullability.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/const_op.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/framework/device_factory.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
using ::testing::Not;
TEST(FallbackStateTest, CreateWithCpuDeviceVector) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
std::variant<std::vector<std::unique_ptr<Device>>,
absl::Nonnull<DynamicDeviceMgr*>>
device_variant = std::move(devices);
auto fallback_state = std::make_unique<tfrt_stub::FallbackState>(
session_options, std::move(device_variant), fdef_lib);
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDevices(), 0);
EXPECT_EQ(device_manager.NumDeviceType("CPU"), 1);
}
TEST(FallbackStateTest, CreateWithDynamicDeviceMgr) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddCpuDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto static_device_mgr =
std::make_unique<DynamicDeviceMgr>(std::move(devices));
absl::Nonnull<DynamicDeviceMgr*> device_mgr_ptr(static_device_mgr.get());
auto fallback_state = std::make_unique<tfrt_stub::FallbackState>(
session_options, device_mgr_ptr, fdef_lib);
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDevices(), 0);
EXPECT_EQ(device_manager.NumDeviceType("CPU"), 1);
}
TEST(FallbackStateTest, CreateRendezvous) {
FunctionDefLibrary flib;
*flib.add_function() = FunctionDefHelper::Define(
"dummy_fn",
{},
{},
{},
{});
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::Create({}, flib));
const ProcessFunctionLibraryRuntime& pflr =
fallback_state->process_function_library_runtime();
FunctionLibraryRuntime::Options opts;
opts.source_device = "/job:localhost/replica:0/task:0";
opts.remote_execution = true;
auto status = pflr.RunSync(opts, pflr.GetHandle("dummy_fn"), {}, nullptr);
EXPECT_THAT(status, Not(StatusIs(error::FAILED_PRECONDITION,
HasSubstr("rendezvous"))));
}
TEST(FallbackStateTest, CreateGraphExecutionState) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(
auto fallback_state,
tfrt_stub::FallbackState::CreateWithCpuDevice(session_options, fdef_lib));
GraphDef graphdef;
{
auto scope = tensorflow::Scope::NewRootScope().WithDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const(scope.WithOpName("a"), 2.0, {1, 1});
TF_ASSERT_OK(scope.ToGraphDef(&graphdef));
}
TF_ASSERT_OK_AND_ASSIGN(
auto graph_execution_state,
fallback_state->CreateGraphExecutionState(std::move(graphdef)));
}
TEST(FallbackStateTest, CreateWithMockGpuDevice) {
tensorflow::SessionOptions session_options;
tensorflow::FunctionDefLibrary fdef_lib;
TF_ASSERT_OK_AND_ASSIGN(auto fallback_state,
tfrt_stub::FallbackState::CreateWithMockGpuDevice(
session_options, fdef_lib));
const auto& device_manager = fallback_state->device_manager();
EXPECT_GT(device_manager.NumDeviceType("GPU"), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/fallback_state.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/fallback/fallback_state_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2091f3ec-d32a-44a7-96a0-c83bf61ad3d7 | cpp | tensorflow/tensorflow | dequantize | tensorflow/lite/toco/graph_transformations/dequantize.cc | tensorflow/lite/kernels/dequantize_test.cc | #include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/graph_transformations/remove_trivial_passthrough.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
template <ArrayDataType A>
void DequantizeBuffer(Array* array) {
const auto old_data = array->GetBuffer<A>().data;
array->buffer = nullptr;
array->data_type = ArrayDataType::kFloat;
auto& new_data = array->GetMutableBuffer<ArrayDataType::kFloat>().data;
new_data.resize(old_data.size());
const auto& qparams = array->GetQuantizationParams();
for (int i = 0, end = old_data.size(); i < end; i++) {
new_data[i] = qparams.scale * (old_data[i] - qparams.zero_point);
}
}
std::vector<std::unique_ptr<Operator>>::iterator FindFirstOpWithInput(
Model* model, const std::string& array_name) {
for (auto it = model->operators.begin(); it != model->operators.end(); ++it) {
for (const auto& input : it->get()->inputs) {
if (input == array_name) {
return it;
}
}
}
return model->operators.end();
}
void ClearArrayQuantizationParams(const std::string& array_name, Model* model) {
auto* array = &model->GetArray(array_name);
CHECK(array->quantization_params);
for (auto& input_array : *model->flags.mutable_input_arrays()) {
if (input_array.name() == array_name) {
auto& qparams = *array->quantization_params;
const double new_std_value = 1. / qparams.scale;
const double new_mean_value = qparams.zero_point;
if (input_array.has_std_value()) {
CHECK_LE(std::abs(new_std_value - input_array.std_value()), 0.001);
} else {
input_array.set_std_value(new_std_value);
}
if (input_array.has_mean_value()) {
CHECK_LE(std::abs(new_mean_value - input_array.mean_value()), 0.001);
} else {
input_array.set_mean_value(new_mean_value);
}
}
}
array->quantization_params = nullptr;
}
bool DequantizeArray(const std::string& array_name,
GraphTransformation* transformation, Model* model) {
auto* array = &model->GetArray(array_name);
if (!array->quantization_params) {
return false;
}
transformation->AddMessageF("Dequantizing array: %s", array_name);
if (array->buffer) {
if (array->data_type == ArrayDataType::kUint8) {
DequantizeBuffer<ArrayDataType::kUint8>(array);
} else if (array->data_type == ArrayDataType::kInt32) {
DequantizeBuffer<ArrayDataType::kInt32>(array);
} else {
LOG(FATAL) << "Unhandled data type";
}
CHECK(array->data_type == ArrayDataType::kFloat);
CHECK(array->buffer->type == ArrayDataType::kFloat);
ClearArrayQuantizationParams(array_name, model);
return true;
} else {
array->data_type = ArrayDataType::kFloat;
}
ClearArrayQuantizationParams(array_name, model);
if (array->buffer) {
return true;
}
auto* op_outputting_array = GetOpWithOutput(*model, array_name);
if (op_outputting_array) {
if (op_outputting_array->type == OperatorType::kReshape) {
return true;
}
}
if (!array->minmax) {
return true;
}
bool must_insert_fakequant_before = false;
bool must_insert_fakequant_after = false;
if (IsInputArray(*model, array_name)) {
must_insert_fakequant_after = true;
}
for (const std::string& output_array : model->flags.output_arrays()) {
if (array_name == output_array) {
must_insert_fakequant_before = true;
}
}
for (const auto& rnn_state : model->flags.rnn_states()) {
if (array_name == rnn_state.state_array()) {
must_insert_fakequant_after = true;
}
if (array_name == rnn_state.back_edge_source_array()) {
must_insert_fakequant_before = true;
}
}
CHECK(!(must_insert_fakequant_before && must_insert_fakequant_after));
auto* fakequant_op = new FakeQuantOperator;
model->operators.emplace(FindFirstOpWithInput(model, array_name),
fakequant_op);
const std::string& new_array_name = AvailableArrayName(*model, array_name);
auto& new_array = model->GetOrCreateArray(new_array_name);
new_array.data_type = ArrayDataType::kFloat;
new_array.copy_shape(array->shape());
new_array.GetOrCreateMinMax() = array->GetMinMax();
fakequant_op->minmax = std::make_unique<MinMax>();
*fakequant_op->minmax = array->GetMinMax();
fakequant_op->narrow_range = array->narrow_range;
if (must_insert_fakequant_before) {
for (const auto& op : model->operators) {
for (std::string& output : op->outputs) {
if (output == array_name) {
output = new_array_name;
}
}
}
fakequant_op->inputs = {new_array_name};
fakequant_op->outputs = {array_name};
} else {
for (const auto& op : model->operators) {
for (std::string& input : op->inputs) {
if (input == array_name) {
input = new_array_name;
}
}
}
fakequant_op->inputs = {array_name};
fakequant_op->outputs = {new_array_name};
}
return true;
}
}
::tensorflow::Status Dequantize::Run(Model* model, std::size_t op_index,
bool* modified) {
*modified = false;
const auto op_it = model->operators.begin() + op_index;
auto* op = op_it->get();
if (op->type == OperatorType::kDequantize) {
auto& input_array = model->GetArray(op->inputs[0]);
if (input_array.data_type == ArrayDataType::kFloat) {
return absl::OkStatus();
}
if (input_array.final_data_type != ArrayDataType::kFloat) {
return absl::OkStatus();
}
input_array.data_type = ArrayDataType::kFloat;
input_array.quantization_params = nullptr;
auto& output_array = model->GetArray(op->outputs[0]);
output_array.data_type = ArrayDataType::kFloat;
output_array.quantization_params = nullptr;
*modified = RemoveTrivialPassthroughOp(this, model, op_index);
return absl::OkStatus();
}
std::vector<std::string> arrays;
arrays.reserve(op->inputs.size());
for (const std::string& input : op->inputs) {
arrays.push_back(input);
}
for (const std::string& output : op->outputs) {
arrays.push_back(output);
}
bool changed = false;
for (const std::string& array : arrays) {
if (!model->IsOptionalArray(array)) {
changed |= DequantizeArray(array, this, model);
}
}
*modified = changed;
return absl::OkStatus();
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "Eigen/Core"
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DEQUANTIZE();
}
}
namespace {
using ::testing::ElementsAreArray;
class DequantizeOpModel : public SingleOpModel {
public:
explicit DequantizeOpModel() {}
DequantizeOpModel(TensorType type, std::initializer_list<int> shape,
float scale, int32_t zero_point, int version) {
const TensorData input_tensor_data = {type, shape, 0, 0, scale, zero_point};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
template <typename T>
void SetInputInt4(int input, const std::vector<T> data) {
auto non_const = *const_cast<std::vector<T>*>(&data);
std::vector<int8_t> data_int8(non_const.size());
std::copy(non_const.begin(), non_const.end(), data_int8.begin());
PopulateTensor4bit(input, 0, data_int8.data(),
data_int8.data() + data_int8.size());
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(DequantizeOpTest, Int4) {
DequantizeOpModel m(TensorType_INT4, {2, 2}, 0.5, -1, 6);
m.SetInputInt4<int8_t>(0, {7, 6, -7, -8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({4, 3.5, -3, -3.5})));
}
TEST(DequantizeOpTest, Uint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, 0.5, 127, 1);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Int8) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, 0.5, -1, 2);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizeOpTest, Float16) {
DequantizeOpModel m(TensorType_FLOAT16, {2, 3}, 1.0f, 0, 3);
std::vector<Eigen::half> half{Eigen::half{-535.54f}, Eigen::half{-100.0f},
Eigen::half{-1.0f}, Eigen::half{0.f},
Eigen::half{1.0f}, Eigen::half{100.32f}};
m.PopulateTensor(0, 0, reinterpret_cast<TfLiteFloat16*>(half.data()),
reinterpret_cast<TfLiteFloat16*>(half.data()) + half.size());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-535.54f, -100.0f, -1.0f, 0.f, 1.0f, 100.32f},
0.1f)));
}
TEST(DequantizeOpTest, Int16) {
DequantizeOpModel m(TensorType_INT16, {2, 5}, 0.5, 0, 4);
m.SetInput<int16_t>({-129, -126, -125, -124, -123, 124, 125, 126, 127, 131});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 65.5})));
}
class DequantizePerChannelOpModel : public DequantizeOpModel {
public:
DequantizePerChannelOpModel(TensorType type, std::initializer_list<int> shape,
std::initializer_list<float> scales,
std::initializer_list<int64_t> zero_points,
int channel_dim, int version) {
std::vector<float> per_channel_scales(scales);
std::vector<int64_t> input_offsets(zero_points);
const TensorData input_tensor_data = {
type, shape, 0, 0, 0.0f, 0, true, per_channel_scales,
input_offsets, channel_dim};
input_ = AddInput(input_tensor_data);
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DEQUANTIZE, ops::builtin::Register_DEQUANTIZE(),
version);
BuildInterpreter({GetShape(input_)});
}
};
TEST(DequantizePerChannelOpTest, Uint8) {
DequantizePerChannelOpModel m(TensorType_UINT8, {2, 5}, {0.5, 0.5},
{127, 127}, 0, 5);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(DequantizePerChannelOpTest, Int8) {
DequantizePerChannelOpModel m(TensorType_INT8, {2, 5}, {0.5, 0.5}, {-1, -1},
0, 5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/dequantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dequantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb460c66-7cb9-40e7-b285-95b38d049932 | cpp | google/arolla | while_loop | arolla/expr/operators/while_loop/while_loop.cc | arolla/expr/operators/while_loop/while_loop_test.cc | #include "arolla/expr/operators/while_loop/while_loop.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/operators/while_loop/while_loop_impl.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::GetAttrQTypes;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
constexpr absl::string_view kDefaultOperatorName = "anonymous.while_loop";
constexpr absl::string_view kLoopStatePlaceholderName = "loop_state";
std::vector<std::string> ExpressionNames(
const NamedExpressions& named_expressions) {
std::vector<std::string> names_order;
names_order.reserve(named_expressions.size());
for (const auto& [k, _] : named_expressions) {
names_order.push_back(k);
}
std::sort(names_order.begin(), names_order.end());
return names_order;
}
absl::StatusOr<NamedExpressions> MakeNamedAccessors(
const ExprNodePtr& tuple_node, absl::Span<const std::string> names_order) {
NamedExpressions named_accessors;
named_accessors.reserve(names_order.size());
for (size_t i = 0; i < names_order.size(); ++i) {
ASSIGN_OR_RETURN(
auto nth_field,
expr::CallOp("core.get_nth", {tuple_node, expr::Literal<int64_t>(i)}));
named_accessors.emplace(names_order[i], std::move(nth_field));
}
return named_accessors;
}
absl::StatusOr<ExprNodePtr> WrapAsTuple(
const NamedExpressions& named_expressions,
absl::Span<const std::string> names_order) {
std::vector<ExprNodePtr> deps;
deps.reserve(names_order.size() + 1);
deps.emplace_back(Literal(Text(absl::StrJoin(names_order, ","))));
for (const auto& f : names_order) {
if (!named_expressions.contains(f)) {
return absl::InvalidArgumentError(absl::StrFormat(
"value for the state variable %s is not specified", f));
}
deps.push_back(named_expressions.at(f));
}
return BindOp("namedtuple.make", deps, {});
}
absl::StatusOr<NamedExpressions> AddImplicitCastsToInitialState(
const NamedExpressions& initial_state, const NamedExpressions& body) {
NamedExpressions new_initial_state = initial_state;
for (auto& [name, expr] : body) {
ASSIGN_OR_RETURN(auto expr_after_one_iteration,
SubstitutePlaceholders(expr, initial_state));
ASSIGN_OR_RETURN(new_initial_state[name],
CallOp("core.cast", {initial_state.at(name),
CallOp("qtype.qtype_of",
{expr_after_one_iteration}),
Literal(true)}),
_ << "while casting initial state for P." << name);
}
return new_initial_state;
}
absl::Status MoveImmutablesIntoInitialState(NamedExpressions& initial_state,
ExprNodePtr& condition,
NamedExpressions& body) {
constexpr absl::string_view kImmutableNamePrefix = "_while_loop_immutable";
for (auto& [name, _] : body) {
if (absl::StartsWith(name, kImmutableNamePrefix)) {
return absl::InvalidArgumentError(absl::StrFormat(
"expression names starting with '%s' are forbidden in while_loop",
kImmutableNamePrefix));
}
}
absl::flat_hash_map<Fingerprint, std::string> immutable_names;
auto immutable_naming_function = [&](const ExprNodePtr& node) -> std::string {
if (auto it = immutable_names.find(node->fingerprint());
it != immutable_names.end()) {
return it->second;
}
std::string name =
absl::StrFormat("%s_%d", kImmutableNamePrefix, immutable_names.size());
immutable_names.emplace(node->fingerprint(), name);
return name;
};
for (auto& [name, expr] : body) {
ASSIGN_OR_RETURN(
(auto [converted_expr, immutables]),
while_loop_impl::ExtractImmutables(expr, immutable_naming_function));
expr = std::move(converted_expr);
initial_state.merge(std::move(immutables));
}
ASSIGN_OR_RETURN(
(auto [converted_condition, condition_immutables]),
while_loop_impl::ExtractImmutables(condition, immutable_naming_function));
condition = std::move(converted_condition);
initial_state.merge(std::move(condition_immutables));
return absl::OkStatus();
}
absl::Status CheckAllStateFieldsAreInitialized(
const std::vector<std::string>& all_field_names,
const std::vector<std::string>& requested_field_names) {
absl::flat_hash_set<absl::string_view> all_field_names_set(
all_field_names.begin(), all_field_names.end());
for (const auto& name : requested_field_names) {
if (!all_field_names_set.contains(name)) {
return absl::InvalidArgumentError(absl::StrFormat(
"no initial value given for the loop state variable `%s`", name));
}
}
return absl::OkStatus();
}
}
absl::StatusOr<ExprNodePtr> MakeWhileLoop(NamedExpressions initial_state,
ExprNodePtr condition,
NamedExpressions body) {
RETURN_IF_ERROR(
MoveImmutablesIntoInitialState(initial_state, condition, body));
auto state_field_names = ExpressionNames(initial_state);
auto mutable_state_field_names = ExpressionNames(body);
RETURN_IF_ERROR(CheckAllStateFieldsAreInitialized(state_field_names,
mutable_state_field_names));
RETURN_IF_ERROR(CheckAllStateFieldsAreInitialized(
state_field_names, expr::GetPlaceholderKeys(condition)));
for (const auto& [_, expr] : body) {
RETURN_IF_ERROR(CheckAllStateFieldsAreInitialized(
state_field_names, expr::GetPlaceholderKeys(expr)));
}
ASSIGN_OR_RETURN(initial_state,
AddImplicitCastsToInitialState(initial_state, body));
std::vector<std::string> immutable_state_field_names;
immutable_state_field_names.reserve(state_field_names.size() -
mutable_state_field_names.size());
absl::c_set_difference(state_field_names, mutable_state_field_names,
std::back_inserter(immutable_state_field_names));
ASSIGN_OR_RETURN(auto init_mutable_state_tuple,
WrapAsTuple(initial_state, mutable_state_field_names));
ASSIGN_OR_RETURN(auto body_mutable_state_tuple,
WrapAsTuple(body, mutable_state_field_names));
ExprOperatorSignature operators_signature;
operators_signature.parameters.reserve(1 +
immutable_state_field_names.size());
operators_signature.parameters.push_back(
ExprOperatorSignature::Parameter{std::string{kLoopStatePlaceholderName}});
std::vector<ExprNodePtr> init_deps;
init_deps.reserve(1 + immutable_state_field_names.size());
init_deps.emplace_back(init_mutable_state_tuple);
for (const auto& name : immutable_state_field_names) {
operators_signature.parameters.push_back(
ExprOperatorSignature::Parameter{name});
DCHECK(initial_state.contains(name))
<< "Internal inconsistency: no initializer for node " << name;
init_deps.emplace_back(initial_state.at(name));
}
auto state_placeholder = Placeholder(kLoopStatePlaceholderName);
ASSIGN_OR_RETURN(
auto state_fields,
MakeNamedAccessors(state_placeholder, mutable_state_field_names));
ASSIGN_OR_RETURN(auto condition_op,
MakeLambdaOperator(
"anonymous.loop_condition", operators_signature,
SubstitutePlaceholders(condition, state_fields,
false)));
ASSIGN_OR_RETURN(auto body_op, MakeLambdaOperator(
"anonymous.loop_body", operators_signature,
SubstitutePlaceholders(
body_mutable_state_tuple, state_fields,
false)));
ASSIGN_OR_RETURN(
ExprOperatorPtr while_op,
WhileLoopOperator::Make(operators_signature, condition_op, body_op));
ASSIGN_OR_RETURN(auto while_node, BindOp(while_op, init_deps, {}));
return while_node;
}
absl::StatusOr<std::shared_ptr<WhileLoopOperator>> WhileLoopOperator::Make(
const ExprOperatorSignature& signature, const ExprOperatorPtr& condition,
const ExprOperatorPtr& body) {
return Make(kDefaultOperatorName, signature, condition, body);
}
absl::StatusOr<std::shared_ptr<WhileLoopOperator>> WhileLoopOperator::Make(
absl::string_view name, const ExprOperatorSignature& signature,
const ExprOperatorPtr& condition, const ExprOperatorPtr& body) {
if (signature.parameters.empty()) {
return absl::InvalidArgumentError(
"WhileLoopOperator must at least have one parameter, got 0");
}
ASSIGN_OR_RETURN(auto condition_signature, condition->GetSignature());
ASSIGN_OR_RETURN(auto body_signature, body->GetSignature());
auto signature_spec = GetExprOperatorSignatureSpec(signature);
auto body_signature_spec = GetExprOperatorSignatureSpec(body_signature);
if (signature_spec != body_signature_spec) {
return absl::InvalidArgumentError(absl::StrFormat(
"loop signature does not match its body signature: `%s` vs `%s`",
signature_spec, body_signature_spec));
}
auto condition_signature_spec =
GetExprOperatorSignatureSpec(condition_signature);
if (signature_spec != condition_signature_spec) {
return absl::InvalidArgumentError(absl::StrFormat(
"loop signature does not match its condition signature: `%s` vs `%s`",
signature_spec, condition_signature_spec));
}
return std::make_shared<WhileLoopOperator>(PrivateConstrutorTag(), name,
signature, condition, body);
}
WhileLoopOperator::WhileLoopOperator(PrivateConstrutorTag,
absl::string_view name,
const ExprOperatorSignature& signature,
const ExprOperatorPtr& condition,
const ExprOperatorPtr& body)
: ExprOperatorWithFixedSignature(
name, signature,
"",
FingerprintHasher("arolla::expr_operators::WhileLoopOperator")
.Combine(name, condition->fingerprint(), body->fingerprint())
.Finish()),
condition_(condition),
body_(body) {}
absl::StatusOr<ExprAttributes> WhileLoopOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
DCHECK_GE(inputs.size(), 1);
if (!inputs[0].qtype()) {
return ExprAttributes{};
}
std::vector<ExprAttributes> new_inputs;
new_inputs.reserve(inputs.size());
new_inputs.emplace_back(inputs[0].qtype());
new_inputs.insert(new_inputs.end(), inputs.begin() + 1, inputs.end());
ASSIGN_OR_RETURN(
auto condition_attr, condition_->InferAttributes(new_inputs),
_ << "in condition of `" << display_name() << "` while loop");
if (condition_attr.qtype() &&
condition_attr.qtype() != GetQType<OptionalUnit>()) {
return absl::FailedPreconditionError(absl::StrFormat(
"incorrect return type of the condition of `%s` while loop for input "
"types %s: expected %s, got %s",
display_name(), FormatTypeVector(GetAttrQTypes(inputs)),
GetQType<OptionalUnit>()->name(), condition_attr.qtype()->name()));
}
ASSIGN_OR_RETURN(auto body_attr, body_->InferAttributes(new_inputs),
_ << "in body of `" << display_name() << "` while loop");
if (body_attr.qtype() && body_attr.qtype() != inputs[0].qtype()) {
return absl::FailedPreconditionError(absl::StrFormat(
"incorrect return type of the body of `%s` while loop for input types "
"%s: expected %s, got %s",
display_name(), FormatTypeVector(GetAttrQTypes(inputs)),
inputs[0].qtype()->name(), body_attr.qtype()->name()));
}
return ExprAttributes(inputs[0].qtype());
}
} | #include "arolla/expr/operators/while_loop/while_loop.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::LambdaOperator;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::expr::Placeholder;
using ::arolla::testing::EqualsAttr;
using ::arolla::testing::EqualsExpr;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::NotNull;
using Attr = ::arolla::expr::ExprAttributes;
TEST(WhileLoopTest, WhileLoopOperatorMake) {
ASSERT_OK_AND_ASSIGN(auto body, MakeLambdaOperator(Placeholder("param")));
ASSERT_OK_AND_ASSIGN(
auto condition,
MakeLambdaOperator(
CallOp("core.equal", {Placeholder("param"), Placeholder("param")})));
ASSERT_OK_AND_ASSIGN(auto good_loop_operator,
WhileLoopOperator::Make(
condition->GetSignature().value(), condition, body));
EXPECT_THAT(good_loop_operator->display_name(), Eq("anonymous.while_loop"));
EXPECT_THAT(good_loop_operator->condition(), Eq(condition));
EXPECT_THAT(good_loop_operator->body(), Eq(body));
EXPECT_THAT(good_loop_operator->InferAttributes({Attr(GetQType<int64_t>())}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
EXPECT_THAT(good_loop_operator->InferAttributes(
{Attr(GetQType<int64_t>()), Attr(GetQType<int64_t>())}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("incorrect number of dependencies passed to "
"an operator node: expected 1 but got 2")));
}
TEST(WhileLoopTest, WhileLoopOperatorMakeValidation) {
ASSERT_OK_AND_ASSIGN(
auto condition,
MakeLambdaOperator(
CallOp("core.equal", {Placeholder("param"), Placeholder("param")})));
ASSERT_OK_AND_ASSIGN(
auto too_many_args_body,
MakeLambdaOperator(
ExprOperatorSignature::Make("x, y"),
CallOp("math.add", {Placeholder("x"), Placeholder("y")})));
EXPECT_THAT(WhileLoopOperator::Make(condition->GetSignature().value(),
condition, too_many_args_body),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("loop signature does not match its body "
"signature: `param` vs `x, y`")));
}
TEST(WhileLoopTest, WhileLoopOperatorWrongCondition) {
ASSERT_OK_AND_ASSIGN(auto good_body,
MakeLambdaOperator(Placeholder("param")));
const auto& wrong_type_condition = good_body;
ASSERT_OK_AND_ASSIGN(
auto wrong_condition_operator,
WhileLoopOperator::Make(wrong_type_condition->GetSignature().value(),
wrong_type_condition, good_body));
EXPECT_THAT(
wrong_condition_operator->InferAttributes({Attr(GetQType<int64_t>())}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("incorrect return type of the condition of "
"`anonymous.while_loop` while loop for input types "
"(INT64): expected OPTIONAL_UNIT, got INT64")));
}
TEST(WhileLoopTest, WhileLoopOperatorWrongBody) {
ASSERT_OK_AND_ASSIGN(
auto condition,
MakeLambdaOperator(
CallOp("core.equal", {Placeholder("param"), Placeholder("param")})));
ASSERT_OK_AND_ASSIGN(
auto wrong_type_body,
MakeLambdaOperator(CallOp("core.to_float64", {Placeholder("param")})));
ASSERT_OK_AND_ASSIGN(
auto wrong_body_operator,
WhileLoopOperator::Make(condition->GetSignature().value(), condition,
wrong_type_body));
EXPECT_THAT(
wrong_body_operator->InferAttributes({Attr(GetQType<int64_t>())}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("incorrect return type of the body of "
"`anonymous.while_loop` while loop for input types "
"(INT64): expected INT64, got FLOAT64")));
}
TEST(WhileLoopTest, MakeWhileLoop) {
auto init_x = Leaf("x");
auto init_y = Leaf("y");
ASSERT_OK_AND_ASSIGN(
auto loop_condition,
CallOp("core.not_equal", {Placeholder("y"), Literal<int64_t>(0)}));
auto new_x = Placeholder("y");
ASSERT_OK_AND_ASSIGN(
auto new_y, CallOp("math.mod", {Placeholder("x"), Literal<int64_t>(57)}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr while_loop,
MakeWhileLoop({{"x", init_x}, {"y", init_y}}, loop_condition,
{{"x", new_x}, {"y", new_y}}));
EXPECT_THAT(
while_loop->node_deps(),
ElementsAre(EqualsExpr(CallOp(
"namedtuple.make",
{Literal(Text("x,y")),
CallOp("core.cast",
{Leaf("x"), CallOp("qtype.qtype_of", {Leaf("y")}),
Literal(true)}),
CallOp(
"core.cast",
{Leaf("y"),
CallOp("qtype.qtype_of",
{CallOp("math.mod", {Leaf("x"), Literal<int64_t>(57)})}),
Literal(true)})}))));
auto while_loop_op =
dynamic_cast<const WhileLoopOperator*>(while_loop->op().get());
ASSERT_THAT(while_loop_op, NotNull());
ASSERT_OK_AND_ASSIGN(
auto state_field_0,
CallOp("core.get_nth", {Placeholder("loop_state"), Literal<int64_t>(0)}));
ASSERT_OK_AND_ASSIGN(
auto state_field_1,
CallOp("core.get_nth", {Placeholder("loop_state"), Literal<int64_t>(1)}));
auto condition_op =
dynamic_cast<const LambdaOperator*>(while_loop_op->condition().get());
ASSERT_THAT(condition_op, NotNull());
EXPECT_THAT(condition_op->lambda_body(),
EqualsExpr(CallOp("core.not_equal",
{state_field_1, Literal<int64_t>(0)})));
auto body_op =
dynamic_cast<const LambdaOperator*>(while_loop_op->body().get());
ASSERT_THAT(body_op, NotNull());
EXPECT_THAT(
body_op->lambda_body(),
EqualsExpr(
CallOp("namedtuple.make",
{Literal(Text("x,y")), state_field_1,
CallOp("math.mod", {state_field_0, Literal<int64_t>(57)})})));
ASSERT_OK_AND_ASSIGN(
QTypePtr good_state_type,
MakeNamedTupleQType({"x", "y"}, MakeTupleQType({GetQType<int64_t>(),
GetQType<int64_t>()})));
EXPECT_THAT(while_loop_op->InferAttributes({Attr(good_state_type)}),
IsOkAndHolds(EqualsAttr(good_state_type)));
ASSERT_OK_AND_ASSIGN(
QTypePtr wrong_state_type,
MakeNamedTupleQType({"x", "y"}, MakeTupleQType({GetQType<int64_t>(),
GetQType<Bytes>()})));
EXPECT_THAT(while_loop_op->InferAttributes({Attr(wrong_state_type)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("in condition of `anonymous.while_loop` "
"while loop")));
}
TEST(WhileLoopTest, MakeWhileLoopErrors) {
auto leaf_x = Leaf("x");
ASSERT_OK_AND_ASSIGN(
auto condition_with_x,
CallOp("core.not_equal", {Placeholder("x"), Literal<int64_t>(0)}));
auto placeholder_x = Placeholder("x");
auto placeholder_y = Placeholder("y");
EXPECT_THAT(
MakeWhileLoop({{"x", leaf_x}}, condition_with_x,
{{"x", placeholder_x}, {"y", placeholder_x}}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("no initial value given for the loop state variable `y`")));
EXPECT_THAT(
MakeWhileLoop({{"x", leaf_x}}, condition_with_x, {{"x", placeholder_y}}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("no initial value given for the loop state variable `y`")));
ASSERT_OK_AND_ASSIGN(
auto condition_with_y,
CallOp("core.not_equal", {Placeholder("y"), Literal<int64_t>(0)}));
EXPECT_THAT(
MakeWhileLoop({{"x", leaf_x}}, condition_with_y, {{"x", placeholder_x}}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("no initial value given for the loop state variable `y`")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/while_loop/while_loop_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
007ea024-5d57-428a-86ad-a1f23ced3e5b | cpp | tensorflow/tensorflow | make_deterministic | tensorflow/core/grappler/optimizers/data/make_deterministic.cc | tensorflow/core/grappler/optimizers/data/make_deterministic_test.cc | #include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include <algorithm>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/optimizers/data/split_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kInterleaveOp[] = "InterleaveDataset";
constexpr char kParallelInterleaveOp[] = "ParallelInterleaveDataset";
constexpr char kLegacyParallelInterleaveOp[] =
"LegacyParallelInterleaveDatasetV2";
constexpr char kMapOp[] = "MapDataset";
constexpr char kParallelMapOp[] = "ParallelMapDataset";
constexpr char kParallelMapOpV2[] = "ParallelMapDatasetV2";
constexpr char kMapAndBatchOp[] = "MapAndBatchDataset";
constexpr char kBatchOp[] = "BatchDataset";
constexpr char kBatchV2Op[] = "BatchDatasetV2";
constexpr char kParallelBatchOp[] = "ParallelBatchDataset";
constexpr char kPrefetchOp[] = "PrefetchDataset";
constexpr std::array<const char*, 9> kDeterministicStatefulOps = {
"TextLineDataset", "FixedLengthRecordDataset", "TFRecordDataset",
"TensorSliceDataset", "RangeDataset", "SSTableDataset", "RecordIODataset",
"Print", "Assert"};
constexpr std::array<const char*, 13> kDeterministicStatefulOpsWhenAsync = {
"RandomUniform",
"RandomUniformInt",
"RandomStandardNormal",
"ParameterizedTruncatedNormal",
"TruncatedNormal",
"RandomShuffle",
"Multinomial",
"RandomGamma",
"RandomGammaGrad",
"RandomPoisson",
"RandomCrop",
"SampleDistortedBoundingBox",
"SampleDistortedBoundingBoxV2"};
bool IsDeterministicWhenRunInParallel(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsDeterministicWhenRunAsynchronously(const std::string& stateful_op) {
for (auto op_in_array : kDeterministicStatefulOps) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
for (auto op_in_array : kDeterministicStatefulOpsWhenAsync) {
if (data::MatchesAnyVersion(op_in_array, stateful_op)) {
return true;
}
}
return false;
}
bool IsParallelInterleave(const std::string& op) {
return data::MatchesAnyVersion(kParallelInterleaveOp, op) ||
op == kLegacyParallelInterleaveOp;
}
bool IsParallelMap(const std::string& op) {
return data::MatchesAnyVersion(kParallelMapOp, op);
}
bool IsParallelBatch(const std::string& op) {
return data::MatchesAnyVersion(kParallelBatchOp, op);
}
bool IsMapAndBatch(const std::string& op) {
return data::MatchesAnyVersion(kMapAndBatchOp, op);
}
bool IsPrefetch(const std::string& op) {
return data::MatchesAnyVersion(kPrefetchOp, op);
}
bool IntroducesFunctionParallelism(const std::string& op) {
return IsParallelInterleave(op) || IsParallelMap(op) || IsMapAndBatch(op);
}
bool IntroducesAsynchrony(const std::string& op) {
return IntroducesFunctionParallelism(op) || IsPrefetch(op) ||
IsParallelBatch(op);
}
absl::flat_hash_map<absl::string_view, const NodeDef*> NameToNode(
const FunctionDef& function) {
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node;
for (const NodeDef& node : function.node_def()) {
name_to_node.insert({node.name(), &node});
}
return name_to_node;
}
NodeDef* GetMutableNode(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
return graph->graph()->mutable_node(index);
}
Status ConvertMapOrInterleave(const string& node_name,
MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
auto Targuments = node->attr().find("Targuments");
if (Targuments == node->attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
int num_inputs_after_rewrite;
if (IsParallelInterleave(node->op())) {
node->set_op(kInterleaveOp);
num_inputs_after_rewrite = 3 + Targuments->second.list().type_size();
} else {
DCHECK(IsParallelMap(node->op()));
node->set_op(kMapOp);
num_inputs_after_rewrite = 1 + Targuments->second.list().type_size();
}
int inputs_processed = 0;
for (int i = 0; i < node->input_size(); i++) {
std::string input = node->input(i);
if (IsControlInput(input)) {
continue;
}
if (inputs_processed >= num_inputs_after_rewrite) {
node->set_input(i, absl::StrCat("^", input));
}
inputs_processed++;
}
if (inputs_processed < num_inputs_after_rewrite) {
return errors::Internal("Found only ", inputs_processed, " inputs to node ",
node_name, ", but expected to find at least ",
num_inputs_after_rewrite);
}
node->mutable_attr()->erase("deterministic");
node->mutable_attr()->erase("sloppy");
return absl::OkStatus();
}
absl::flat_hash_set<absl::string_view> GetAllTransitiveDependencies(
const FunctionDef& function_def,
const absl::flat_hash_set<absl::string_view>& nodes) {
std::vector<absl::string_view> nodes_to_process;
std::copy(nodes.begin(), nodes.end(), std::back_inserter(nodes_to_process));
absl::flat_hash_map<absl::string_view, const NodeDef*> name_to_node =
NameToNode(function_def);
absl::flat_hash_set<absl::string_view> dependencies;
while (!nodes_to_process.empty()) {
absl::string_view node_name = nodes_to_process.back();
nodes_to_process.pop_back();
if (dependencies.contains(node_name)) {
continue;
}
dependencies.insert(node_name);
auto iter = name_to_node.find(node_name);
if (iter == name_to_node.end()) {
continue;
}
for (absl::string_view inp : iter->second->input()) {
absl::string_view inp_node = inp.substr(0, inp.find(':'));
if (inp_node.at(0) == '^') {
inp_node = inp_node.substr(1);
}
if (name_to_node.contains(inp_node)) {
nodes_to_process.push_back(inp_node);
}
}
}
return dependencies;
}
Status SplitMap(
const FunctionLibraryDefinition& library, const string& map_node_name,
MutableGraphView* graph,
const absl::flat_hash_set<absl::string_view>& nondeterministic_nodes) {
NodeDef* map_node = GetMutableNode(map_node_name, graph);
NameAttrList func = map_node->attr().at("f").func();
const FunctionDef* function_def = library.Find(func.name());
if (!function_def) {
return errors::Internal("Could not look up function ", func.name(),
" in FunctionLibraryDefinition");
}
absl::flat_hash_set<absl::string_view> nodes_to_move =
GetAllTransitiveDependencies(*function_def, nondeterministic_nodes);
VLOG(2) << "Will move nodes to nonparallel function: "
<< absl::StrJoin(nodes_to_move, ", ");
int64_t num_captured_arguments =
map_node->attr().find("Targuments")->second.list().type_size();
TF_ASSIGN_OR_RETURN(
split_utils::SplitResults split_results,
split_utils::SplitFunction(*function_def, nodes_to_move,
num_captured_arguments, library));
if (split_results.first_function_output_types.empty()) {
return errors::Unimplemented(
"The case where the first function has no outputs is unimplemented.");
}
bool is_map_and_batch = map_node->op() == kMapAndBatchOp;
NodeDef* first_map_node_ptr;
{
NodeDef first_map_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_sequential_map/", map_node->name()),
graph->graph(), &first_map_node);
first_map_node.set_op(kMapOp);
int num_control_deps = NumControlInputs(*map_node);
int num_extra_inputs = is_map_and_batch ? 3 : 1;
int control_deps_index = map_node->input_size() - num_control_deps;
int extra_inputs_index = control_deps_index - num_extra_inputs;
for (int i = 0; i < extra_inputs_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
for (int i = extra_inputs_index; i < control_deps_index; i++) {
DCHECK(!IsControlInput(map_node->input(i)));
first_map_node.add_input(absl::StrCat("^", map_node->input(i)));
}
for (int i = control_deps_index; i < map_node->input_size(); i++) {
DCHECK(IsControlInput(map_node->input(i)));
first_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*first_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.first_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &first_map_node);
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &first_map_node);
}
}
AddNodeAttr("output_types", split_results.first_function_output_types,
&first_map_node);
TensorShapeProto unknown_shape;
unknown_shape.set_unknown_rank(true);
std::vector<TensorShapeProto> output_shapes(
split_results.first_function_output_types.size(), unknown_shape);
AddNodeAttr("output_shapes", output_shapes, &first_map_node);
first_map_node_ptr = graph->AddNode(std::move(first_map_node));
}
NodeDef* second_map_node_ptr;
{
NodeDef second_map_node;
string node_name =
map_node->op() == kMapAndBatchOp ? "map_and_batch" : "parallel_map";
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("make_deterministic_parallel_", node_name, "/",
map_node->name()),
graph->graph(), &second_map_node);
second_map_node.set_op(map_node->op());
second_map_node.add_input(first_map_node_ptr->name());
for (int i = 1; i < map_node->input_size(); i++) {
second_map_node.add_input(map_node->input(i));
}
NameAttrList* name_attr_list =
(*second_map_node.mutable_attr())["f"].mutable_func();
name_attr_list->set_name(split_results.second_function.signature().name());
graph_utils::CopyAttribute("Targuments", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_types", *map_node, &second_map_node);
graph_utils::CopyAttribute("output_shapes", *map_node, &second_map_node);
if (!is_map_and_batch) {
AddNodeAttr("deterministic", "true", &second_map_node);
}
for (auto key : {"use_inter_op_parallelism", "preserve_cardinality"}) {
if (gtl::FindOrNull(map_node->attr(), key)) {
graph_utils::CopyAttribute(key, *map_node, &second_map_node);
}
}
second_map_node_ptr = graph->AddNode(std::move(second_map_node));
}
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(map_node->name(), second_map_node_ptr->name()));
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.first_function;
*graph->graph()->mutable_library()->mutable_function()->Add() =
split_results.second_function;
return absl::OkStatus();
}
Status ConvertBatch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
node->set_op(kBatchV2Op);
std::string num_parallel_calls_input = node->input(2);
node->set_input(2, node->input(3));
node->set_input(3, absl::StrCat("^", num_parallel_calls_input));
node->mutable_attr()->erase("deterministic");
return absl::OkStatus();
}
Status ConvertMapAndBatch(const string& node_name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(node_name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << node_name
<< " in the optimized graph.";
const NodeDef& orig_node = graph->graph()->node(index);
auto Targuments = orig_node.attr().find("Targuments");
if (Targuments == orig_node.attr().end()) {
return errors::Internal("Failed to find Targuments attribute for node ",
node_name);
}
NodeDef new_map_node;
new_map_node.set_op(kMapOp);
graph_utils::SetUniqueGraphNodeName(kMapOp, graph->graph(), &new_map_node);
int num_map_inputs = 1 + Targuments->second.list().type_size();
for (int i = 0; i < num_map_inputs; i++) {
new_map_node.add_input(orig_node.input(i));
}
for (int i = num_map_inputs; i < orig_node.input_size(); i++) {
if (IsControlInput(orig_node.input(i))) {
new_map_node.add_input(orig_node.input(i));
} else {
new_map_node.add_input(absl::StrCat("^", orig_node.input(i)));
}
}
for (auto key : {"f", "Targuments", "output_types"}) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
for (auto key : {"preserve_cardinality"}) {
if (gtl::FindOrNull(new_map_node.attr(), key)) {
graph_utils::CopyAttribute(key, orig_node, &new_map_node);
}
}
auto orig_output_shapes = orig_node.attr().find("output_shapes");
if (orig_output_shapes == orig_node.attr().end()) {
return errors::Internal("Failed to find output_shapes attribute for node ",
node_name);
}
AttrValue& map_output_shapes =
(*new_map_node.mutable_attr())["output_shapes"];
for (const TensorShapeProto& orig_shape :
orig_output_shapes->second.list().shape()) {
TensorShapeProto* new_shape = map_output_shapes.mutable_list()->add_shape();
if (orig_shape.unknown_rank()) {
new_shape->set_unknown_rank(true);
} else if (orig_shape.dim_size() == 0) {
return errors::Internal(
"Output shape of MapAndBatch node cannot be scalar");
} else {
for (int i = 1; i < orig_shape.dim_size(); i++) {
*new_shape->add_dim() = orig_shape.dim(i);
}
}
}
NodeDef new_batch_node;
new_batch_node.set_op(kBatchV2Op);
graph_utils::SetUniqueGraphNodeName(kBatchOp, graph->graph(),
&new_batch_node);
new_batch_node.add_input(new_map_node.name());
new_batch_node.add_input(orig_node.input(num_map_inputs));
new_batch_node.add_input(
orig_node.input(num_map_inputs + 2));
graph_utils::CopyShapesAndTypesAttrs(orig_node, &new_batch_node);
graph->AddNode(std::move(new_map_node));
NodeDef* graph_batch_node = graph->AddNode(std::move(new_batch_node));
TF_RETURN_IF_ERROR(
graph->UpdateFanouts(orig_node.name(), graph_batch_node->name()));
return absl::OkStatus();
}
Status ConvertPrefetch(const string& node_name, MutableGraphView* graph) {
NodeDef* node = GetMutableNode(node_name, graph);
constexpr int buffer_size_index = 1;
node->add_input(absl::StrCat("^", node->input(buffer_size_index)));
NodeDef* tmp = graph_utils::AddScalarConstNode<int64_t>(0, graph);
node->set_input(buffer_size_index, tmp->name());
return absl::OkStatus();
}
enum class NondeterminismType { PARALLELISM, ASYNCHRONY };
bool IsDeterministicStatefulOp(NondeterminismType type,
const std::string& stateful_op) {
return type == NondeterminismType::PARALLELISM
? IsDeterministicWhenRunInParallel(stateful_op)
: IsDeterministicWhenRunAsynchronously(stateful_op);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed);
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed,
absl::flat_hash_set<absl::string_view>* nondeterministic_nodes) {
if (functions_processed->contains(function_name)) {
return false;
}
functions_processed->insert(function_name);
const FunctionDef* function_def = library.Find(function_name);
if (!function_def) {
VLOG(2) << "Could not look up function " << function_name
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool found = false;
for (const NodeDef& node_def : function_def->node_def()) {
bool nondeterministic = FunctionNodeMayIntroduceNondeterminism(
library, node_def, nondeterminism_type, functions_processed);
if (nondeterministic) {
if (nondeterministic_nodes) {
nondeterministic_nodes->insert(node_def.name());
found = true;
} else {
return true;
}
}
}
return found;
}
bool FunctionMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const std::string& function_name,
NondeterminismType nondeterminism_type) {
absl::flat_hash_set<string> functions_processed;
return FunctionMayIntroduceNondeterminism(library, function_name,
nondeterminism_type,
&functions_processed, nullptr);
}
bool FunctionNodeMayIntroduceNondeterminism(
const FunctionLibraryDefinition& library, const NodeDef& node_def,
NondeterminismType nondeterminism_type,
absl::flat_hash_set<std::string>* functions_processed) {
const OpRegistrationData* op_reg_data = nullptr;
Status s = library.LookUp(node_def.op(), &op_reg_data);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in FunctionLibraryDefinition, so rewriting op to be safe";
return true;
}
bool is_function_op = op_reg_data->is_function_op;
bool is_stateful = false;
if (!is_function_op) {
const OpDef* op_def;
s = OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def);
if (!s.ok()) {
VLOG(2) << "Could not look up op " << node_def.op()
<< " in OpRegistry, so rewriting op to be safe";
return true;
}
is_stateful = op_def->is_stateful();
}
if (is_stateful && !IsStatefulPartitionedCall((node_def)) &&
!IsIf(node_def) && !IsWhile(node_def) &&
!IsDeterministicStatefulOp(nondeterminism_type, node_def.op())) {
VLOG(2) << "Will rewrite due to op: " << node_def.op();
return true;
}
std::vector<std::string> attr_func_names;
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
if (is_function_op) {
attr_func_names.push_back(node_def.op());
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
nondeterminism_type,
functions_processed, nullptr)) {
return true;
}
}
return false;
}
bool NodeMayIntroduceNondeterminismWhenAsync(
const FunctionLibraryDefinition& library, const NodeDef& node) {
const OpDef* op_def;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op_def);
if (s.code() == error::NOT_FOUND) {
return false;
} else if (!s.ok()) {
return true;
}
if (data::DatasetOpKernel::IsDatasetOp(*op_def)) {
std::vector<std::string> attr_func_names;
for (const auto& attr : node.attr()) {
if (attr.second.has_func()) {
attr_func_names.push_back(attr.second.func().name());
}
for (const auto& name_attr_list : attr.second.list().func()) {
attr_func_names.push_back(name_attr_list.name());
}
}
for (const std::string& inner_function_name : attr_func_names) {
if (FunctionMayIntroduceNondeterminism(library, inner_function_name,
NondeterminismType::ASYNCHRONY)) {
return true;
}
}
}
return false;
}
bool GraphMayHaveAsyncNondeterminism(const FunctionLibraryDefinition& library,
const GraphDef& graph) {
for (const NodeDef& node : graph.node()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
for (const string& function_name : library.ListFunctionNames()) {
const FunctionDef* function_def = library.Find(function_name);
CHECK(function_def);
for (const NodeDef& node : function_def->node_def()) {
if (NodeMayIntroduceNondeterminismWhenAsync(library, node)) {
return true;
}
}
}
return false;
}
}
Status MakeDeterministic::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
absl::flat_hash_set<string> nodes_to_delete;
bool remove_async_nodes =
GraphMayHaveAsyncNondeterminism(function_library, item.graph);
for (const NodeDef& node : item.graph.node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["sloppy"].set_b(false);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op())) {
NodeDef* mutable_node = GetMutableNode(node.name(), &graph);
(*mutable_node->mutable_attr())["deterministic"].set_s("true");
stats->num_changes++;
}
bool rewrite_due_to_async =
IntroducesAsynchrony(node.op()) && remove_async_nodes;
absl::flat_hash_set<std::string> functions_processed;
absl::flat_hash_set<absl::string_view> nondeterministic_nodes;
bool rewrite_due_to_parallelism =
IntroducesFunctionParallelism(node.op()) &&
FunctionMayIntroduceNondeterminism(
function_library, node.attr().at("f").func().name(),
NondeterminismType::PARALLELISM, &functions_processed,
&nondeterministic_nodes);
if (!rewrite_due_to_async && !rewrite_due_to_parallelism) {
continue;
}
VLOG(1) << "Rewriting node " << node.name() << " (" << node.op()
<< ") because it introduces nondeterminism through "
<< (rewrite_due_to_async ? "asynchrony" : "parallelism");
bool maybe_can_split =
!rewrite_due_to_async &&
(node.op() == kParallelMapOpV2 || IsMapAndBatch(node.op()));
if (maybe_can_split) {
Status s = SplitMap(function_library, node.name(), &graph,
nondeterministic_nodes);
if (s.ok()) {
VLOG(1) << "Split node " << node.name() << " (" << node.op()
<< ") into two map nodes: a nonparallel version and a "
"parallel version.";
nodes_to_delete.insert(node.name());
continue;
} else if (s.code() == error::UNIMPLEMENTED) {
VLOG(1) << "Could not move stateful ops to their own function, so will "
"convert node "
<< node.name()
<< " to a nonparallel version instead. Reason: " << s;
} else {
return s;
}
}
if (IsPrefetch(node.op())) {
TF_RETURN_IF_ERROR(ConvertPrefetch(node.name(), &graph));
} else if (IsMapAndBatch(node.op())) {
TF_RETURN_IF_ERROR(ConvertMapAndBatch(node.name(), &graph));
nodes_to_delete.insert(node.name());
} else if (IsParallelBatch(node.op())) {
TF_RETURN_IF_ERROR(ConvertBatch(node.name(), &graph));
} else {
DCHECK(IsParallelInterleave(node.op()) || IsParallelMap(node.op()));
TF_RETURN_IF_ERROR(ConvertMapOrInterleave(node.name(), &graph));
}
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MakeDeterministic, "make_deterministic");
}
} | #include "tensorflow/core/grappler/optimizers/data/make_deterministic.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
std::vector<string> GetNodeNames(const FunctionDef& func) {
std::vector<string> node_names;
for (const NodeDef& node : func.node_def()) {
node_names.push_back(node.name());
}
return node_names;
}
class SplitMapTest : public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(SplitMapTest, SplitMapFunction) {
using test::function::NDef;
GrapplerItem item;
bool deterministic, rewrite_map_and_batch;
std::tie(deterministic, rewrite_map_and_batch) = GetParam();
if (deterministic && rewrite_map_and_batch) {
LOG(INFO) << "Skipping test because MapAndBatch does not have "
"'deterministic' attribute";
return;
}
FunctionDef orig_func_def = FunctionDefHelper::Create(
"MyFunction",
{"a1: float", "a2: float", "a3: double"},
{"o1: float", "o2: double"},
{},
{
{{"i1"}, "Identity", {"a2"}, {{"T", DT_FLOAT}}},
{{"i2"}, "Identity", {"i1:output"}, {{"T", DT_FLOAT}}},
{{"stateful"},
"SampleDistortedBoundingBox",
{"a1", "i2:output"},
{{"T", DT_FLOAT}}},
{{"i3"}, "Identity", {"stateful:bboxes:0"}, {{"T", DT_FLOAT}}},
{{"i4"}, "Identity", {"a3"}, {{"T", DT_DOUBLE}}},
},
{{"o1", "i3:output"}, {"o2", "i4:output"}});
NodeDef orig_map_node_def;
if (rewrite_map_and_batch) {
orig_map_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map", "range", "batch_size", "num_parallel_calls", "drop_remainder",
"MyFunction");
} else {
orig_map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "MyFunction",
deterministic ? "true" : "false", false);
}
orig_map_node_def.add_input("^start");
AttrValue* attr_val = &(*orig_map_node_def.mutable_attr())["Targuments"];
SetAttrValue(std::vector<DataType>{DT_DOUBLE}, attr_val);
(*orig_map_node_def.mutable_attr())["preserve_cardinality"].set_b(true);
attr_val = &(*orig_map_node_def.mutable_attr())["output_types"];
SetAttrValue(std::vector<DataType>{DT_FLOAT, DT_DOUBLE}, attr_val);
attr_val = &(*orig_map_node_def.mutable_attr())["output_shapes"];
SetAttrValue(std::vector<TensorShape>{{1}, {1}}, attr_val);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
orig_map_node_def},
{orig_func_def});
MakeDeterministic optimizer;
GraphDef output;
VLOG(1) << "GraphDef before optimization:\n"
<< item.graph.DebugString() << "\n\n";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
VLOG(1) << "GraphDef after optimization:\n" << output.DebugString() << "\n\n";
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef first_map_node_def = output.node(index);
if (rewrite_map_and_batch) {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^batch_size", "^num_parallel_calls",
"^drop_remainder", "^start"));
} else {
ASSERT_THAT(
first_map_node_def.input(),
::testing::ElementsAre("range", "^num_parallel_calls", "^start"));
}
std::vector<DataType> t_arguments;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
std::vector<DataType> output_types;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT));
std::vector<TensorShapeProto> output_shapes;
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_TRUE(shape.unknown_rank());
}
bool preserve_cardinality;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef first_func = output.library().function(index);
ASSERT_TRUE(first_func.signature().is_stateful());
ASSERT_THAT(GetNodeNames(first_func),
::testing::UnorderedElementsAre("i1", "i2", "stateful"));
NodeDef second_map_node_def;
if (rewrite_map_and_batch) {
index = graph_utils::FindGraphNodeWithOp("MapAndBatchDataset", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(), "batch_size",
"num_parallel_calls", "drop_remainder",
"^start"));
} else {
index = graph_utils::FindGraphNodeWithOp("ParallelMapDatasetV2", output);
CHECK_GE(index, 0);
second_map_node_def = output.node(index);
ASSERT_THAT(second_map_node_def.input(),
::testing::ElementsAre(first_map_node_def.name(),
"num_parallel_calls", "^start"));
ASSERT_EQ(second_map_node_def.attr().at("deterministic").s(), "true");
}
t_arguments.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "Targuments", &t_arguments));
ASSERT_THAT(t_arguments, ::testing::ElementsAre(DT_DOUBLE));
output_types.clear();
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "output_types", &output_types));
ASSERT_THAT(output_types, ::testing::ElementsAre(DT_FLOAT, DT_DOUBLE));
output_shapes.clear();
TF_ASSERT_OK(
GetNodeAttr(first_map_node_def, "output_shapes", &output_shapes));
for (const TensorShapeProto& shape : output_shapes) {
ASSERT_EQ(shape.dim_size(), 0);
}
TF_ASSERT_OK(GetNodeAttr(first_map_node_def, "preserve_cardinality",
&preserve_cardinality));
ASSERT_TRUE(preserve_cardinality);
TF_ASSERT_OK(GetNodeAttr(second_map_node_def, "f", &f));
ASSERT_EQ(f.attr_size(), 0);
index = graph_utils::FindGraphFunctionWithName(f.name(), output.library());
CHECK_GE(index, 0);
FunctionDef second_func = output.library().function(index);
ASSERT_THAT(GetNodeNames(second_func),
::testing::UnorderedElementsAre("i3", "i4"));
}
INSTANTIATE_TEST_SUITE_P(Test, SplitMapTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
FunctionDef OuterXTimesTwo() {
return FunctionDefHelper::Define(
"OuterXTimesTwo",
{"x: float"},
{"y: float"},
{},
{{{"y"},
"PartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterRandomUniform() {
return FunctionDefHelper::Define(
"OuterRandomUniform",
{"x: float"},
{"random_uniform: int64"},
{},
{{{"random_uniform"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_FLOAT}},
{"Tout", DataTypeSlice{DT_INT64}},
{"f", FunctionDefHelper::FunctionRef("RandomUniformFn",
{{"T", DT_FLOAT}})}}}});
}
FunctionDef OuterReadResourceVariable() {
return FunctionDefHelper::Define(
"OuterReadResourceVariable",
{"x: resource"},
{"y: float"},
{},
{{{"y"},
"StatefulPartitionedCall",
{"x"},
{{"Tin", DataTypeSlice{DT_RESOURCE}},
{"Tout", DataTypeSlice{DT_FLOAT}},
{"f", FunctionDefHelper::FunctionRef("ReadResourceVariable", {})}}}});
}
class MakeDeterministicTest
: public ::testing::TestWithParam<std::tuple<bool, bool>> {};
TEST_P(MakeDeterministicTest, NoRewriteInterleave) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic)},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelInterleaveDatasetV2");
ASSERT_EQ(node_def.attr().at("sloppy").b(), false);
}
TEST_P(MakeDeterministicTest, NoRewriteMap) {
using test::function::NDef;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterXTimesTwo" : "XTimesTwo";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false)},
{test::function::XTimesTwo(), OuterXTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelMapDatasetV2");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "ParallelBatchDataset");
ASSERT_EQ(node_def.attr().at("deterministic").s(), "true");
}
TEST_P(MakeDeterministicTest, NoRewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 2);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(1), "buffer_size");
NodeDef buffer_size =
output.node(graph_utils::FindGraphNodeWithName("buffer_size", output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 1);
}
TEST_P(MakeDeterministicTest, RewriteInterleave) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name = nest ? "OuterRandomUniform" : "RandomUniformFn";
NodeDef interleave_node_def = graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", func_name, !deterministic);
interleave_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
interleave_node_def},
{test::function::RandomUniform(), OuterRandomUniform()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("InterleaveDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "cycle_length");
ASSERT_EQ(node_def.input(2), "block_length");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
}
enum CannotSplitReason { FUNC_HAS_ATTR, ASYNC_NONDETERMINISM };
class RewriteMapWithoutSplitTest
: public ::testing::TestWithParam<
std::tuple<bool, bool, CannotSplitReason>> {};
TEST_P(RewriteMapWithoutSplitTest, RewriteMapWithoutSplit) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
CannotSplitReason reason;
std::tie(nest, deterministic, reason) = GetParam();
FunctionDef func;
FunctionDef outer_func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
outer_func = OuterRandomUniform();
(*outer_func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
outer_func = OuterReadResourceVariable();
}
std::string func_name =
nest ? outer_func.signature().name() : func.signature().name();
NodeDef map_node_def = graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false", false);
map_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
map_node_def},
{func, outer_func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "^num_parallel_calls");
ASSERT_EQ(node_def.input(2), "^start");
NameAttrList f;
TF_ASSERT_OK(GetNodeAttr(node_def, "f", &f));
ASSERT_EQ(f.name(), func_name);
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
}
TEST_P(MakeDeterministicTest, RewriteBatch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
NodeDef batch_node_def = graph_tests_utils::MakeParallelBatchNode(
"batch", "map", "batch_size", "num_parallel_calls", "drop_remainder",
deterministic ? "true" : "false");
batch_node_def.add_input("^start");
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapNode("map", "range", func_name),
batch_node_def},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 5);
ASSERT_EQ(node_def.input(0), "map");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "drop_remainder");
ASSERT_EQ(node_def.input(3), "^num_parallel_calls");
ASSERT_EQ(node_def.input(4), "^start");
ASSERT_EQ(node_def.attr().count("deterministic"), 0);
}
TEST_P(MakeDeterministicTest, RewritePrefetch) {
using test::function::NDef;
typedef FunctionDefHelper FDH;
GrapplerItem item;
bool nest, deterministic;
std::tie(nest, deterministic) = GetParam();
std::string func_name =
nest ? "OuterReadResourceVariable" : "ReadResourceVariable";
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("buffer_size", "Const", {},
{{"value", Tensor(int64_t{1})}, {"dtype", DT_INT64}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", func_name,
deterministic ? "true" : "false",
false),
graph_tests_utils::MakePrefetchNode("prefetch", "map", "buffer_size")},
{test::function::ReadResourceVariable(), OuterReadResourceVariable()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("prefetch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.op(), "PrefetchDataset");
ASSERT_EQ(node_def.input_size(), 3);
ASSERT_THAT(node_def.input(0), ::testing::EndsWith("map"));
ASSERT_EQ(node_def.input(2), "^buffer_size");
NodeDef buffer_size = output.node(
graph_utils::FindGraphNodeWithName(node_def.input(1), output));
EXPECT_EQ(buffer_size.attr().at("value").tensor().int64_val(0), 0);
}
INSTANTIATE_TEST_SUITE_P(Test, MakeDeterministicTest,
::testing::Combine(::testing::Bool(),
::testing::Bool()));
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapWithoutSplitTest,
::testing::Combine(::testing::Bool(), ::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
TEST(NoRewriteMapAndBatchTest, NoRewriteMapAndBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{test::function::XTimesTwo()});
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int index = graph_utils::FindGraphNodeWithName("map_and_batch", output);
ASSERT_GE(index, 0);
NodeDef node_def = output.node(index);
ASSERT_EQ(node_def.input_size(), 4);
ASSERT_EQ(node_def.input(0), "range");
ASSERT_EQ(node_def.input(1), "batch_size");
ASSERT_EQ(node_def.input(2), "num_parallel_calls");
ASSERT_EQ(node_def.input(3), "drop_remainder");
}
class RewriteMapAndBatchWithoutSplitTest
: public ::testing::TestWithParam<std::tuple<bool, CannotSplitReason>> {};
TEST_P(RewriteMapAndBatchWithoutSplitTest, RewriteMapAndBatchWithoutSplit) {
using test::function::NDef;
GrapplerItem item;
bool nest;
CannotSplitReason reason;
std::tie(nest, reason) = GetParam();
FunctionDef func;
if (reason == FUNC_HAS_ATTR) {
func = test::function::RandomUniform();
(*func.mutable_attr())["test_attr"].set_s("test_value");
} else {
func = test::function::ReadResourceVariable();
}
NodeDef map_and_batch_node_def = graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", func.signature().name());
SetAttrValue(
absl::Span<const PartialTensorShape>{
{2}, {-1, 3, -1}, PartialTensorShape()},
&(*map_and_batch_node_def.mutable_attr())["output_shapes"]);
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
map_and_batch_node_def},
{func});
VLOG(1) << "Orig graph: \n" << item.graph.DebugString() << "\n\n";
MakeDeterministic optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
ASSERT_FALSE(graph_utils::ContainsNodeWithOp("MapAndBatchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("MapDataset", output);
ASSERT_GE(index, 0);
NodeDef map_node_def = output.node(index);
ASSERT_EQ(map_node_def.input_size(), 4);
ASSERT_EQ(map_node_def.input(0), "range");
ASSERT_EQ(map_node_def.input(1), "^batch_size");
ASSERT_EQ(map_node_def.input(2), "^num_parallel_calls");
ASSERT_EQ(map_node_def.input(3), "^drop_remainder");
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("f"),
map_node_def.attr().at("f")));
ASSERT_TRUE(AreAttrValuesEqual(map_and_batch_node_def.attr().at("Targuments"),
map_node_def.attr().at("Targuments")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
map_node_def.attr().at("output_types")));
ASSERT_EQ(map_node_def.attr().at("output_shapes").list().shape_size(), 3);
ASSERT_TRUE(PartialTensorShape({}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(0)));
ASSERT_TRUE(PartialTensorShape({3, -1}).IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(1)));
ASSERT_TRUE(PartialTensorShape().IsIdenticalTo(
map_node_def.attr().at("output_shapes").list().shape(2)));
index = graph_utils::FindGraphNodeWithOp("BatchDatasetV2", output);
ASSERT_GE(index, 0);
NodeDef batch_node_def = output.node(index);
ASSERT_EQ(batch_node_def.input_size(), 3);
ASSERT_EQ(batch_node_def.input(0), map_node_def.name());
ASSERT_EQ(batch_node_def.input(1), "batch_size");
ASSERT_EQ(batch_node_def.input(2), "drop_remainder");
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_types"),
batch_node_def.attr().at("output_types")));
ASSERT_TRUE(
AreAttrValuesEqual(map_and_batch_node_def.attr().at("output_shapes"),
batch_node_def.attr().at("output_shapes")));
}
INSTANTIATE_TEST_SUITE_P(
Test, RewriteMapAndBatchWithoutSplitTest,
::testing::Combine(::testing::Bool(),
::testing::Values(FUNC_HAS_ATTR, ASYNC_NONDETERMINISM)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_deterministic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_deterministic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
93707a59-8f29-468f-8a8b-35eca835d93e | cpp | tensorflow/tensorflow | bundle_v2 | tensorflow/cc/saved_model/bundle_v2.cc | tensorflow/cc/saved_model/bundle_v2_test.cc | #include "tensorflow/cc/saved_model/bundle_v2.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/fingerprinting.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/byte_order.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/byte_swap_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/strcat.h"
namespace tensorflow {
namespace {
using strings::StrCat;
constexpr char kCCLoadBundleV2Label[] = "cc_load_bundle_v2";
absl::Status ReadCheckpointObjectGraph(BundleReader* bundle_reader,
TrackableObjectGraph* object_graph) {
Tensor object_graph_tensor;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle_reader->Lookup(kObjectGraphProtoKey, &object_graph_tensor),
"SavedModel checkpoint does not contain object graph.");
if (object_graph_tensor.dtype() != DT_STRING ||
object_graph_tensor.dims() != 0 ||
object_graph_tensor.NumElements() != 1) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph was not the correct type.");
}
const tstring* object_graph_string = reinterpret_cast<const tstring*>(
object_graph_tensor.tensor_data().data());
if (!object_graph->ParseFromString(*object_graph_string)) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
"SavedModel checkpoint object graph could not be deserialized.");
}
return absl::OkStatus();
}
}
absl::Status SavedModelV2Bundle::Load(const std::string& export_dir,
SavedModelV2Bundle* const bundle) {
metrics::SavedModelReadApi(kCCLoadBundleV2Label).IncrementBy(1);
SavedModel saved_model_proto;
TF_RETURN_IF_ERROR(ReadSavedModel(export_dir, &saved_model_proto));
metrics::SavedModelReadPath().Set(export_dir);
if (saved_model_proto.meta_graphs_size() != 1) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
strings::StrCat(
"SavedModelV2 should have exactly one MetaGraphDef but actually ",
"contains ", saved_model_proto.meta_graphs_size()));
}
bundle->meta_graph_def_ =
std::move(*saved_model_proto.mutable_meta_graphs(0));
if (!port::kLittleEndian) {
TF_RETURN_IF_ERROR(
ByteSwapTensorContentInMetaGraphDef(&(bundle->meta_graph_def_)));
}
TF_RETURN_IF_ERROR(
ReadSavedModelDebugInfoIfPresent(export_dir, &bundle->debug_info_));
const std::string variables_dir =
io::JoinPath(export_dir, kSavedModelVariablesDirectory);
if (!Env::Default()->FileExists(variables_dir).ok()) {
LOG(INFO)
<< "No checkpoint found, assuming this is a program-only SavedModel";
} else {
const std::string variables_prefix =
io::JoinPath(variables_dir, kSavedModelVariablesFilename);
bundle->variable_reader_ =
std::make_unique<BundleReader>(Env::Default(), variables_prefix);
TF_RETURN_WITH_CONTEXT_IF_ERROR(
bundle->variable_reader_->status(),
"Unable to load SavedModel variables checkpoint from ",
variables_prefix);
TF_RETURN_IF_ERROR(ReadCheckpointObjectGraph(
bundle->variable_reader_.get(), &bundle->trackable_object_graph_));
}
auto fingerprint_proto =
saved_model::fingerprinting::ReadSavedModelFingerprint(export_dir);
if (fingerprint_proto.ok()) {
metrics::SavedModelReadFingerprint().Set(
metrics::MakeFingerprintJson(fingerprint_proto.value()));
TF_ASSIGN_OR_RETURN(
std::string path_and_singleprint,
metrics::MakeSavedModelPathAndSingleprint(
export_dir, saved_model::fingerprinting::Singleprint(
fingerprint_proto.value())));
metrics::SavedModelReadPathAndSingleprint().Set(path_and_singleprint);
}
return absl::OkStatus();
}
absl::Status SavedModelV2Bundle::VisitObjectsToRestore(
RestoreObjectsCallback callback) {
if (saved_object_graph().nodes_size() == 0 ||
trackable_object_graph().nodes_size() == 0) {
return absl::OkStatus();
}
const SavedObject* root_saved_object = &saved_object_graph().nodes(0);
const TrackableObjectGraph::TrackableObject* root_trackable_object =
&trackable_object_graph().nodes(0);
absl::flat_hash_set<int> trackable_node_ids;
return RecurseObjectsToRestore(root_saved_object, 0, root_trackable_object,
std::string(), &trackable_node_ids,
std::move(callback));
}
absl::Status SavedModelV2Bundle::RecurseObjectsToRestore(
const SavedObject* saved_object, int saved_object_node_id,
const TrackableObjectGraph::TrackableObject* trackable_object,
std::string object_name, absl::flat_hash_set<int>* seen_trackable_node_ids,
RestoreObjectsCallback callback) {
if (saved_object_node_id != 0 &&
(trackable_object->attributes_size() > 0 ||
trackable_object->slot_variables_size() > 0)) {
TF_RETURN_WITH_CONTEXT_IF_ERROR(
callback(saved_object_node_id, *trackable_object), "Unable to restore ",
object_name);
}
for (const auto& trackable_child_ref : trackable_object->children()) {
const auto& local_name = trackable_child_ref.local_name();
std::string child_name;
if (object_name.empty()) {
child_name = local_name;
} else {
child_name = strings::StrCat(object_name, ".", local_name);
}
int trackable_child_node_id = trackable_child_ref.node_id();
if (!seen_trackable_node_ids->insert(trackable_child_node_id).second) {
continue;
}
if (trackable_child_node_id < 0 ||
trackable_child_node_id >= trackable_object_graph().nodes_size()) {
return errors::FailedPrecondition(
strings::StrCat("Illegal trackable child node id for ", child_name));
}
const auto* trackable_child =
&trackable_object_graph().nodes(trackable_child_node_id);
int saved_child_node_id = -1;
const SavedObject* saved_child = nullptr;
for (const auto& saved_child_ref : saved_object->children()) {
if (saved_child_ref.local_name() == local_name) {
saved_child_node_id = saved_child_ref.node_id();
if (saved_child_node_id >= 0 &&
saved_child_node_id < saved_object_graph().nodes_size()) {
saved_child = &saved_object_graph().nodes(saved_child_node_id);
}
break;
}
}
if (!saved_child) {
return absl::Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Could not find saved object to restore for ",
child_name));
}
TF_RETURN_IF_ERROR(RecurseObjectsToRestore(
saved_child, saved_child_node_id, trackable_child, child_name,
seen_trackable_node_ids, callback));
}
return absl::OkStatus();
}
} | #include "tensorflow/cc/saved_model/bundle_v2.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "json/json.h"
#include "json/reader.h"
#include "json/value.h"
#include "tensorflow/cc/saved_model/metrics.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr char kTestData[] = "cc/saved_model/testdata";
class BundleV2Test : public ::testing::Test {
protected:
BundleV2Test() {}
void RestoreVarsAndVerify(SavedModelV2Bundle* bundle,
std::vector<std::string> expected_names) {
using RestoredVarType = std::tuple<int, std::string, std::string>;
std::vector<RestoredVarType> restored_vars;
TF_ASSERT_OK(bundle->VisitObjectsToRestore(
[&](int saved_node_id,
const TrackableObjectGraph::TrackableObject& trackable_object)
-> absl::Status {
for (const auto& attr : trackable_object.attributes()) {
if (attr.name() == "VARIABLE_VALUE") {
restored_vars.emplace_back(saved_node_id, attr.full_name(),
attr.checkpoint_key());
}
}
return absl::OkStatus();
}));
for (const auto& expected_name : expected_names) {
EXPECT_EQ(1, std::count_if(restored_vars.begin(), restored_vars.end(),
[&](RestoredVarType t) {
return std::get<1>(t) == expected_name;
}));
}
for (const auto& restored_var : restored_vars) {
const auto& saved_node =
bundle->saved_object_graph().nodes(std::get<0>(restored_var));
EXPECT_EQ(std::get<1>(restored_var), saved_node.variable().name());
Tensor value;
TF_ASSERT_OK(
bundle->variable_reader()->Lookup(std::get<2>(restored_var), &value));
}
}
};
TEST_F(BundleV2Test, LoadsVarsAndArithmeticObjectGraph) {
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"variable_x", "variable_y", "child_variable"});
}
TEST_F(BundleV2Test, LoadsCyclicModule) {
const std::string export_dir =
io::JoinPath(testing::TensorFlowSrcRoot(), kTestData, "CyclicModule");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_GT(bundle.trackable_object_graph().nodes_size(), 0);
RestoreVarsAndVerify(&bundle, {"MyVariable"});
}
TEST_F(BundleV2Test, UpdatesMetrics) {
const std::string kCCLoadBundleV2Label = "cc_load_bundle_v2";
const int read_count = metrics::SavedModelReadCount("2").value();
const int api_count =
metrics::SavedModelReadApi(kCCLoadBundleV2Label).value();
const std::string export_dir = io::JoinPath(
testing::TensorFlowSrcRoot(), kTestData, "VarsAndArithmeticObjectGraph");
SavedModelV2Bundle bundle;
TF_ASSERT_OK(SavedModelV2Bundle::Load(export_dir, &bundle));
EXPECT_EQ(metrics::SavedModelReadCount("2").value(), read_count + 1);
EXPECT_EQ(metrics::SavedModelReadApi(kCCLoadBundleV2Label).value(),
api_count + 1);
EXPECT_EQ(metrics::SavedModelReadPath().value(), export_dir);
Json::Value fingerprint = Json::objectValue;
Json::Reader reader = Json::Reader();
reader.parse(metrics::SavedModelReadFingerprint().value(), fingerprint);
EXPECT_EQ(fingerprint["saved_model_checksum"].asUInt64(),
15788619162413586750ULL);
EXPECT_EQ(fingerprint["graph_def_program_hash"].asUInt64(),
706963557435316516ULL);
EXPECT_EQ(fingerprint["signature_def_hash"].asUInt64(),
5693392539583495303ULL);
EXPECT_EQ(fingerprint["saved_object_graph_hash"].asUInt64(),
12074714563970609759ULL);
EXPECT_EQ(fingerprint["checkpoint_hash"].asUInt64(), 10788359570789890102ULL);
TF_ASSERT_OK_AND_ASSIGN(
auto path_and_singleprint,
metrics::ParseSavedModelPathAndSingleprint(
metrics::SavedModelReadPathAndSingleprint().value()));
auto [path, singleprint] = path_and_singleprint;
EXPECT_TRUE(absl::StrContains(
path, absl::StrCat(kTestData, "/VarsAndArithmeticObjectGraph")));
EXPECT_EQ(singleprint,
"706963557435316516/"
"5693392539583495303/"
"12074714563970609759/"
"10788359570789890102");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/bundle_v2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/bundle_v2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
30284e41-2319-4f52-8b80-30cb28be38ab | cpp | tensorflow/tensorflow | dynamic_parameter_binding | third_party/xla/xla/hlo/ir/dynamic_parameter_binding.cc | third_party/xla/xla/service/dynamic_parameter_binding_test.cc | #include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <optional>
#include <ostream>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::Status DynamicParameterBinding::Bind(
const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension) {
auto result = bindings_.emplace(dynamic_dimension, dynamic_parameter);
TF_RET_CHECK(result.second);
return absl::OkStatus();
}
std::optional<DynamicParameterBinding::DynamicSizeParameter>
DynamicParameterBinding::GetBinding(
const DynamicDimension& dynamic_dimension) const {
auto param_iter = bindings_.find(dynamic_dimension);
if (param_iter == bindings_.end()) {
return std::nullopt;
}
return param_iter->second;
}
std::string DynamicParameterBinding::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("DynamicParameterBinding: ");
for (const auto& binding : bindings_) {
const DynamicDimension& dynamic_dimension = binding.first;
const DynamicSizeParameter& dynamic_param = binding.second;
pieces.push_back(absl::StrFormat(
" -- Input param number %lld at %s has dim %lld as dynamic"
" dimension, which is represented by param number %lld at "
"%s",
dynamic_dimension.parameter_num,
dynamic_dimension.parameter_index.ToString(),
dynamic_dimension.dimension, dynamic_param.parameter_num,
dynamic_param.parameter_index.ToString()));
}
return absl::StrJoin(pieces, "\n");
}
absl::Status DynamicParameterBinding::ForEachBinding(BindingFn fn) const {
for (const auto& binding : bindings_) {
TF_RETURN_IF_ERROR(fn(binding.second, binding.first));
}
return absl::OkStatus();
}
absl::Status DynamicParameterBinding::Verify(
const HloComputation& computation) const {
return ForEachBinding([&](const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension)
-> absl::Status {
TF_RET_CHECK(dynamic_parameter.parameter_num >= 0 &&
dynamic_parameter.parameter_num <
computation.num_parameters());
TF_RET_CHECK(dynamic_dimension.parameter_num <
computation.num_parameters());
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_parameter.parameter_num)
->shape(),
dynamic_parameter.parameter_index));
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index));
TF_RET_CHECK(
dynamic_dimension.dimension <
ShapeUtil::GetSubshape(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index)
.rank());
return absl::OkStatus();
});
}
std::ostream& operator<<(std::ostream& out,
const DynamicParameterBinding& binding) {
out << binding.ToString();
return out;
}
} | #include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DynamicParameterBindingTest = HloTestBase;
TEST_F(DynamicParameterBindingTest, SimpleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[10] parameter(1)
ROOT root = (f32[], f32[10]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {}},
DynamicParameterBinding::DynamicDimension{1, {}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{1,
{},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBindingWithMultiDimension) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10, 10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10, 10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10, 10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 1}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
std::optional<DynamicParameterBinding::DynamicSizeParameter> param2 =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param2);
EXPECT_EQ(param2->parameter_num, 0);
EXPECT_EQ(param2->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/dynamic_parameter_binding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_parameter_binding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0e563bc-2bb4-482f-856e-379601a30544 | cpp | tensorflow/tensorflow | const_op_size | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include <climits>
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/Types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace quant {
namespace {
constexpr int64_t kAssumedNumBytesPerElem = 4;
int64_t GetSizeOfIntOrFloatConst(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
const ElementsAttr const_value = const_op.getValue();
const auto bytes_per_elem =
static_cast<int64_t>(dtype.getIntOrFloatBitWidth() / CHAR_BIT);
return bytes_per_elem * const_value.getNumElements();
}
int64_t GetSizeOfStringConst(TF::ConstOp const_op) {
const ElementsAttr const_value = const_op.getValue();
const auto str_attr = cast<DenseStringElementsAttr>(const_value);
return absl::c_accumulate(
str_attr.getRawStringData(), 0,
[](int64_t acc, const StringRef str_value) -> int64_t {
return acc + str_value.size();
});
}
int64_t GetSizeOfUnsupportedTypeConst(TF::ConstOp const_op) {
return kAssumedNumBytesPerElem * const_op.getValue().getNumElements();
}
}
int64_t GetSizeInBytes(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
if (dtype.isIntOrFloat()) {
return GetSizeOfIntOrFloatConst(const_op);
} else if (isa<TF::StringType>(dtype)) {
return GetSizeOfStringConst(const_op);
} else {
return GetSizeOfUnsupportedTypeConst(const_op);
}
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::Eq;
class GetSizeInBytesTest : public ::testing::Test {
protected:
GetSizeInBytesTest() : ctx_() { ctx_.loadDialect<TF::TensorFlowDialect>(); }
MLIRContext ctx_;
};
TF::ConstOp ParseConstOp(const absl::string_view const_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(const_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto const_op = dyn_cast_or_null<TF::ConstOp>(block.front());
EXPECT_TRUE(const_op);
return const_op;
}
TEST_F(GetSizeInBytesTest, Int32ScalarConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
TEST_F(GetSizeInBytesTest, Int32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<2xi32>} : () -> tensor<2xi32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(8));
}
TEST_F(GetSizeInBytesTest, Int8ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<2> : tensor<3xi8>} : () -> tensor<3xi8>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(3));
}
TEST_F(GetSizeInBytesTest, Float32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<4xf32>} : () -> tensor<4xf32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Float64ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<2xf64>} : () -> tensor<2xf64>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Bfloat16ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<1.0> : tensor<7xbf16>} : () -> tensor<7xbf16>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(14));
}
TEST_F(GetSizeInBytesTest, TfStringConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<["Hello World", "Quantization"]> : tensor<2x!tf_type.string>} : () -> tensor<2x!tf_type.string>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(23));
}
TEST_F(GetSizeInBytesTest, ConstOpWithUnknownSizeAssumes4BytesPerElement) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = #tf_type<tensor_proto : "0xDEADBAAD"> : tensor<!tf_type.variant>} : () -> tensor<!tf_type.variant>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
834bb1af-cfe3-4222-b0e6-4f6bafc3759b | cpp | tensorflow/tensorflow | move_copy_to_users | third_party/xla/xla/service/gpu/transforms/move_copy_to_users.cc | third_party/xla/xla/service/gpu/transforms/move_copy_to_users_test.cc | #include "xla/service/gpu/transforms/move_copy_to_users.h"
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class MoveCopyToUsersVisitor : public DfsHloRewriteVisitor {
absl::Status HandlePad(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
HloInstruction* c = hlo->mutable_operand(1);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_pad,
MakePadHlo(copied, c, hlo->padding_config(), &hlo->metadata()));
*earlier_pad->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_pad, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeSliceHlo(copied, hlo->slice_starts(), hlo->slice_limits(),
hlo->slice_strides(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeDynamicSliceHlo(
copied,
absl::Span<HloInstruction* const>(hlo->operands()).subspan(1),
hlo->dynamic_slice_sizes(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduceWindow(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reduce_window,
MakeReduceWindowHlo(copied, hlo->mutable_operand(1), hlo->window(),
hlo->called_computations()[0], &hlo->metadata()));
*earlier_reduce_window->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy =
MakeCopyHlo(earlier_reduce_window, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy && !hlo->shape().IsTuple()) {
HloInstruction* new_reduce = hlo->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), {operand->mutable_operand(0),
hlo->mutable_operand(1)}));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_reduce));
}
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* hlo) override {
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (hlo->opcode() == HloOpcode::kReducePrecision) {
return absl::OkStatus();
}
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_elementwise,
MakeUnaryHlo(hlo->opcode(), copied, &hlo->metadata()));
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReverse(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reverse,
MakeReverseHlo(copied, hlo->dimensions(), &hlo->metadata()));
HloInstruction* later_copy = MakeCopyHlo(earlier_reverse, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleConvert(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
HloInstruction* earlier_convert = MakeConvertToHlo(
copied, hlo->shape().element_type(), &hlo->metadata());
HloInstruction* later_copy = MakeCopyHlo(earlier_convert, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
HloInstruction* a = hlo->mutable_operand(0);
HloInstruction* b = hlo->mutable_operand(1);
if (a->opcode() == HloOpcode::kCopy && b->opcode() == HloOpcode::kCopy) {
HloInstruction* copied_a = a->mutable_operand(0);
HloInstruction* copied_b = b->mutable_operand(0);
if (copied_a->shape() == copied_b->shape()) {
HloInstruction* earlier_elementwise;
if (hlo->opcode() == HloOpcode::kCompare) {
TF_ASSIGN_OR_RETURN(
earlier_elementwise,
MakeCompareHlo(hlo->comparison_direction(), copied_a, copied_b,
&hlo->metadata()));
} else {
TF_ASSIGN_OR_RETURN(earlier_elementwise,
MakeBinaryHlo(hlo->opcode(), copied_a, copied_b,
&hlo->metadata()));
}
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
}
return absl::OkStatus();
}
absl::Status HandleConcatenate(HloInstruction* hlo) override {
const HloInstruction* first = hlo->operand(0);
if (first->opcode() != HloOpcode::kCopy) {
return absl::OkStatus();
}
const HloInstruction* inner_op = first->operand(0);
const Layout& inner_op_layout = inner_op->shape().layout();
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (HloInstruction* op : hlo->mutable_operands()) {
if (op->opcode() != HloOpcode::kCopy ||
op->operand(0)->shape().layout() != inner_op_layout) {
VLOG(3) << "Mismatch between " << op->ToString()
<< " and expected op layout " << inner_op_layout.ToString();
return absl::OkStatus();
}
new_operands.push_back(op->mutable_operand(0));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_concat,
MakeConcatHlo(new_operands, hlo->concatenate_dimension()));
*new_concat->mutable_shape()->mutable_layout() = inner_op_layout;
HloInstruction* new_copy = MakeCopyHlo(new_concat, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_copy));
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> MoveCopyToUsers::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return MoveCopyToUsersVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/gpu/transforms/move_copy_to_users.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/service/layout_assignment.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class MoveCopyToUsersTest : public HloTestBase {
public:
MoveCopyToUsersTest()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
void CheckMoveCopyToUsers(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, MoveCopyToUsers{}, expected);
}
};
TEST_F(MoveCopyToUsersTest, Pad) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = s8[1,17,9,9]{3,1,2,0} parameter(0)
copy = s8[1,17,9,9]{1,3,2,0} copy(input)
constant = s8[] constant(0)
ROOT pad = s8[1,32,9,9]{1,3,2,0} pad(copy, constant), padding=0_0x0_15x0_0x0_0
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Unary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} sqrt(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reverse) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} reverse(copy), dimensions={1,2}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Convert) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT converted = f16[1,17,9,9]{1,3,2,0} convert(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Slice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT slice = f32[1,4,6,6]{1,3,2,0} slice(copy), slice={[0:1],[0:4],[0:6],[0:6]}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, DynamicSlice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
p0 = s32[] parameter(1)
p1 = s32[] parameter(2)
p2 = s32[] parameter(3)
p3 = s32[] parameter(4)
ROOT ds = f32[1,4,6,6]{1,3,2,0} dynamic-slice(copy, p0, p1, p2, p3), dynamic_slice_sizes={1,4,6,6}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ReduceWindow) {
const char* hlo = R"(
HloModule R2Window
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2Window {
operand = f32[256,384]{1,0} parameter(0)
c = f32[256,384]{0,1} copy(operand)
constant = f32[] constant(1)
ROOT reduce-window = f32[256,384]{0,1} reduce-window(c, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reduce) {
const char* hlo = R"(
HloModule R2
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2 {
operand = f32[256,384,10]{2,1,0} parameter(0)
c = f32[256,384,10]{0,1,2} copy(operand)
constant = f32[] constant(1)
ROOT reduce = f32[384,10]{0,1} reduce(c, constant), dimensions={0}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Binary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, BinaryDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
TEST_F(MoveCopyToUsersTest, Concat) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[5,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[5,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[6,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ConcatDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[2,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/move_copy_to_users.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/move_copy_to_users_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
df9c3ca4-5178-49e5-9e5f-3c983d3d5701 | cpp | tensorflow/tensorflow | window_util | third_party/xla/xla/window_util.cc | third_party/xla/xla/window_util_test.cc | #include "xla/window_util.h"
#include <functional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/str_cat.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace window_util {
Window MakeWindow(absl::Span<const int64_t> sizes) {
Window window;
for (int64_t size : sizes) {
auto* dimension = window.add_dimensions();
dimension->set_size(size);
dimension->set_stride(1);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
Window MakeWindow(absl::Span<const int64_t> sizes,
absl::Span<const int64_t> strides) {
Window window;
CHECK_EQ(sizes.size(), strides.size());
for (auto nb = 0; nb < sizes.size(); ++nb) {
auto* dimension = window.add_dimensions();
dimension->set_size(sizes[nb]);
dimension->set_stride(strides[nb]);
dimension->set_base_dilation(1);
dimension->set_window_dilation(1);
}
return window;
}
PaddingConfig MakeSymmetricPadding(absl::Span<const int64_t> sizes) {
PaddingConfig config;
for (int64_t size : sizes) {
auto* dimension = config.add_dimensions();
dimension->set_edge_padding_low(size);
dimension->set_edge_padding_high(size);
}
return config;
}
std::string ToString(const WindowDimension& dim) {
using absl::StrAppend;
using absl::StrCat;
std::string str = StrCat("(size=", dim.size());
if (dim.stride() != 1) {
StrAppend(&str, ",stride=", dim.stride());
}
if (dim.padding_low() != 0) {
StrAppend(&str, ",padding_low=", dim.padding_low());
}
if (dim.padding_high() != 0) {
StrAppend(&str, ",padding_high=", dim.padding_high());
}
if (dim.base_dilation() != 1) {
StrAppend(&str, ",base_dilation=", dim.base_dilation());
}
if (dim.window_dilation() != 1) {
StrAppend(&str, ",window_dilation=", dim.window_dilation());
}
if (dim.window_reversal()) {
StrAppend(&str, ",window_reversal");
}
StrAppend(&str, ")");
return str;
}
std::string ToString(const Window& window) {
using absl::StrAppend;
using absl::StrCat;
std::string str;
const auto add_field =
[&](const char* heading,
absl::FunctionRef<std::string(const WindowDimension&)> format) {
StrAppend(&str, heading, "=");
const char* prefix = "";
for (const auto& window_dimension : window.dimensions()) {
StrAppend(&str, prefix, format(window_dimension));
prefix = "x";
}
};
if (window.dimensions_size() > 0) {
add_field("size",
[](const WindowDimension& dim) { return StrCat(dim.size()); });
}
if (HasStride(window)) {
add_field(" stride",
[](const WindowDimension& dim) { return StrCat(dim.stride()); });
}
if (HasPadding(window)) {
add_field(" pad", [](const WindowDimension& dim) {
return StrCat(dim.padding_low(), "_", dim.padding_high());
});
}
if (HasBaseDilation(window)) {
add_field(" lhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.base_dilation());
});
}
if (HasWindowDilation(window)) {
add_field(" rhs_dilate", [](const WindowDimension& dim) {
return StrCat(dim.window_dilation());
});
}
if (HasWindowReversal(window)) {
add_field(" rhs_reversal", [](const WindowDimension& dim) {
return StrCat(dim.window_reversal() ? 1 : 0);
});
}
return str;
}
bool HasStride(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.stride() != 1) {
return true;
}
}
return false;
}
bool HasPadding(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.padding_low() != 0 || dim.padding_high() != 0) {
return true;
}
}
return false;
}
bool HasSymmetricPadding(const Window& window) {
return absl::c_all_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() == dim.padding_high();
});
}
bool HasSymmetricPadding(const PaddingConfig& padding_config) {
return absl::c_all_of(padding_config.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.edge_padding_low() ==
dim.edge_padding_high();
});
}
bool HasNegativePadding(const Window& window) {
return absl::c_any_of(window.dimensions(), [](const WindowDimension& dim) {
return dim.padding_low() < 0 || dim.padding_high() < 0;
});
}
bool HasBaseDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.base_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowDilation(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_dilation() != 1) {
return true;
}
}
return false;
}
bool HasWindowReversal(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.window_reversal()) {
return true;
}
}
return false;
}
bool AllOrNoneReversed(const Window& window) {
if (window.dimensions().empty()) {
return true;
}
bool reversed = window.dimensions()[0].window_reversal();
return absl::c_all_of(window.dimensions(), [&](const WindowDimension& dim) {
return dim.window_reversal() == reversed;
});
}
bool HasDilation(const Window& window) {
return HasBaseDilation(window) || HasWindowDilation(window);
}
bool IsTrivialWindowDimension(const WindowDimension& window_dimension) {
return window_dimension.size() == 1 && window_dimension.stride() == 1 &&
window_dimension.padding_low() == 0 &&
window_dimension.padding_high() == 0 &&
window_dimension.window_dilation() == 1 &&
window_dimension.base_dilation() == 1;
}
bool HasOverlappingWindow(const Window& window) {
for (const auto& dim : window.dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
return false;
}
int64_t DilatedBound(int64_t bound, int64_t dilation) {
CHECK_GE(bound, 0);
CHECK_GE(dilation, 1);
if (bound == 0) {
return 0;
}
return (bound - 1) * dilation + 1;
}
int64_t StridedBound(int64_t bound, int64_t window_size, int64_t stride) {
CHECK_GE(window_size, 0);
CHECK_GE(bound, 0);
CHECK_GE(stride, 1);
if (bound == 0 || window_size > bound) {
return 0;
}
return (bound - window_size) / stride + 1;
}
}
} | #include "xla/window_util.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(WindowUtilTest, HasOverlappingWindowTest) {
EXPECT_FALSE(
window_util::HasOverlappingWindow(window_util::MakeWindow({1, 1})));
EXPECT_TRUE(
window_util::HasOverlappingWindow(window_util::MakeWindow({2, 2, 2, 2})));
}
TEST(WindowUtilTest, MakeWindowStrideTest) {
Window w = window_util::MakeWindow({1, 2}, {3, 4});
EXPECT_EQ(w.dimensions()[0].size(), 1);
EXPECT_EQ(w.dimensions()[1].size(), 2);
EXPECT_EQ(w.dimensions()[0].stride(), 3);
EXPECT_EQ(w.dimensions()[1].stride(), 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/window_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/window_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09cb284c-bf40-4c9c-9f0c-54355ea7ec3e | cpp | google/quiche | quic_stream_priority | quiche/quic/core/quic_stream_priority.cc | quiche/quic/core/quic_stream_priority_test.cc | #include "quiche/quic/core/quic_stream_priority.h"
#include <optional>
#include <string>
#include <vector>
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/structured_headers.h"
namespace quic {
std::string SerializePriorityFieldValue(HttpStreamPriority priority) {
quiche::structured_headers::Dictionary dictionary;
if (priority.urgency != HttpStreamPriority::kDefaultUrgency &&
priority.urgency >= HttpStreamPriority::kMinimumUrgency &&
priority.urgency <= HttpStreamPriority::kMaximumUrgency) {
dictionary[HttpStreamPriority::kUrgencyKey] =
quiche::structured_headers::ParameterizedMember(
quiche::structured_headers::Item(
static_cast<int64_t>(priority.urgency)),
{});
}
if (priority.incremental != HttpStreamPriority::kDefaultIncremental) {
dictionary[HttpStreamPriority::kIncrementalKey] =
quiche::structured_headers::ParameterizedMember(
quiche::structured_headers::Item(priority.incremental), {});
}
std::optional<std::string> priority_field_value =
quiche::structured_headers::SerializeDictionary(dictionary);
if (!priority_field_value.has_value()) {
QUICHE_BUG(priority_field_value_serialization_failed);
return "";
}
return *priority_field_value;
}
std::optional<HttpStreamPriority> ParsePriorityFieldValue(
absl::string_view priority_field_value) {
std::optional<quiche::structured_headers::Dictionary> parsed_dictionary =
quiche::structured_headers::ParseDictionary(priority_field_value);
if (!parsed_dictionary.has_value()) {
return std::nullopt;
}
uint8_t urgency = HttpStreamPriority::kDefaultUrgency;
bool incremental = HttpStreamPriority::kDefaultIncremental;
for (const auto& [name, value] : *parsed_dictionary) {
if (value.member_is_inner_list) {
continue;
}
const std::vector<quiche::structured_headers::ParameterizedItem>& member =
value.member;
if (member.size() != 1) {
QUICHE_BUG(priority_field_value_parsing_internal_error);
continue;
}
const quiche::structured_headers::Item item = member[0].item;
if (name == HttpStreamPriority::kUrgencyKey && item.is_integer()) {
int parsed_urgency = item.GetInteger();
if (parsed_urgency >= HttpStreamPriority::kMinimumUrgency &&
parsed_urgency <= HttpStreamPriority::kMaximumUrgency) {
urgency = parsed_urgency;
}
} else if (name == HttpStreamPriority::kIncrementalKey &&
item.is_boolean()) {
incremental = item.GetBoolean();
}
}
return HttpStreamPriority{urgency, incremental};
}
} | #include "quiche/quic/core/quic_stream_priority.h"
#include <optional>
#include "quiche/quic/core/quic_types.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quic::test {
TEST(HttpStreamPriority, DefaultConstructed) {
HttpStreamPriority priority;
EXPECT_EQ(HttpStreamPriority::kDefaultUrgency, priority.urgency);
EXPECT_EQ(HttpStreamPriority::kDefaultIncremental, priority.incremental);
}
TEST(HttpStreamPriority, Equals) {
EXPECT_EQ((HttpStreamPriority()),
(HttpStreamPriority{HttpStreamPriority::kDefaultUrgency,
HttpStreamPriority::kDefaultIncremental}));
EXPECT_EQ((HttpStreamPriority{5, true}), (HttpStreamPriority{5, true}));
EXPECT_EQ((HttpStreamPriority{2, false}), (HttpStreamPriority{2, false}));
EXPECT_EQ((HttpStreamPriority{11, true}), (HttpStreamPriority{11, true}));
EXPECT_NE((HttpStreamPriority{1, true}), (HttpStreamPriority{3, true}));
EXPECT_NE((HttpStreamPriority{4, false}), (HttpStreamPriority{4, true}));
EXPECT_NE((HttpStreamPriority{6, true}), (HttpStreamPriority{2, false}));
EXPECT_NE((HttpStreamPriority{12, true}), (HttpStreamPriority{9, true}));
EXPECT_NE((HttpStreamPriority{2, false}), (HttpStreamPriority{8, false}));
}
TEST(WebTransportStreamPriority, DefaultConstructed) {
WebTransportStreamPriority priority;
EXPECT_EQ(priority.session_id, 0);
EXPECT_EQ(priority.send_group_number, 0);
EXPECT_EQ(priority.send_order, 0);
}
TEST(WebTransportStreamPriority, Equals) {
EXPECT_EQ(WebTransportStreamPriority(),
(WebTransportStreamPriority{0, 0, 0}));
EXPECT_NE(WebTransportStreamPriority(),
(WebTransportStreamPriority{1, 2, 3}));
EXPECT_NE(WebTransportStreamPriority(),
(WebTransportStreamPriority{0, 0, 1}));
}
TEST(QuicStreamPriority, Default) {
EXPECT_EQ(QuicStreamPriority().type(), QuicPriorityType::kHttp);
EXPECT_EQ(QuicStreamPriority().http(), HttpStreamPriority());
}
TEST(QuicStreamPriority, Equals) {
EXPECT_EQ(QuicStreamPriority(), QuicStreamPriority(HttpStreamPriority()));
}
TEST(QuicStreamPriority, Type) {
EXPECT_EQ(QuicStreamPriority(HttpStreamPriority()).type(),
QuicPriorityType::kHttp);
EXPECT_EQ(QuicStreamPriority(WebTransportStreamPriority()).type(),
QuicPriorityType::kWebTransport);
}
TEST(SerializePriorityFieldValueTest, SerializePriorityFieldValue) {
EXPECT_EQ("", SerializePriorityFieldValue(
{ 3, false}));
EXPECT_EQ("u=5", SerializePriorityFieldValue(
{ 5, false}));
EXPECT_EQ("i", SerializePriorityFieldValue(
{ 3, true}));
EXPECT_EQ("u=0, i", SerializePriorityFieldValue(
{ 0, true}));
EXPECT_EQ("i", SerializePriorityFieldValue(
{ 9, true}));
}
TEST(ParsePriorityFieldValueTest, ParsePriorityFieldValue) {
std::optional<HttpStreamPriority> result = ParsePriorityFieldValue("");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(3, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("i=?1");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(3, result->urgency);
EXPECT_TRUE(result->incremental);
result = ParsePriorityFieldValue("u=5");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(5, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("u=5, i");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(5, result->urgency);
EXPECT_TRUE(result->incremental);
result = ParsePriorityFieldValue("i, u=1");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(1, result->urgency);
EXPECT_TRUE(result->incremental);
result = ParsePriorityFieldValue("u=5, i=?1, i=?0, u=2");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(2, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("a=42, u=4, i=?0");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(4, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("u=-2, i");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(3, result->urgency);
EXPECT_TRUE(result->incremental);
result = ParsePriorityFieldValue("u=4.2, i=\"foo\"");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(3, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("a=4, b=?1");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(3, result->urgency);
EXPECT_FALSE(result->incremental);
result = ParsePriorityFieldValue("000");
EXPECT_FALSE(result.has_value());
result = ParsePriorityFieldValue("a=(1 2), u=1");
ASSERT_TRUE(result.has_value());
EXPECT_EQ(1, result->urgency);
EXPECT_FALSE(result->incremental);
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_priority.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_priority_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
db56bb06-7c34-4ecb-b2e3-92e681833f69 | cpp | tensorflow/tensorflow | tensor | tensorflow/lite/delegates/gpu/cl/tensor.cc | tensorflow/cc/experimental/base/tests/tensor_test.cc | #include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/cl/buffer.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_image_format.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
absl::Status AllocateTensorMemoryInternal(const CLContext& context,
const TensorDescriptor& descriptor,
CLMemory* result) {
cl_mem_flags mem_flags = CL_MEM_READ_WRITE;
const uint8_t* data_ptr = nullptr;
if (!descriptor.GetData().empty()) {
data_ptr = descriptor.GetData().data();
mem_flags |= CL_MEM_COPY_HOST_PTR;
}
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
switch (descriptor.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER: {
const size_t data_size = storage_dims[0] * descriptor.GetElementSize() *
SizeOf(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
clCreateBuffer(context.context(), mem_flags, data_size,
const_cast<uint8_t*>(data_ptr), &error_code);
if (!memory) {
return absl::UnknownError(
absl::StrCat("Failed to allocate device memory (clCreateBuffer): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_2D: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
CreateImage2DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 2D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_3D: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE3D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = storage_dims[2];
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
CreateImage3DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 3D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::TEXTURE_ARRAY: {
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D_ARRAY;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_array_size = storage_dims[2];
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
format.image_channel_order = CL_RGBA;
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
cl_int error_code;
cl_mem memory =
clCreateImage(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create 2D texture array (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
case TensorStorageType::SINGLE_TEXTURE_2D: {
const int element_size = descriptor.GetElementSize();
if (element_size > 4) {
return absl::InvalidArgumentError(absl::StrCat(
"SINGLE_TEXTURE_2D support only channels in range [1-4], but ",
element_size, "was provided"));
}
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = storage_dims[0];
desc.image_height = storage_dims[1];
desc.image_depth = 0;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.buffer = nullptr;
cl_image_format format;
if (context.IsFloatTexture2DSupported(element_size,
descriptor.GetDataType())) {
format.image_channel_order = ToChannelOrder(element_size);
format.image_channel_data_type =
DataTypeToChannelType(descriptor.GetDataType());
} else {
return absl::InvalidArgumentError(
absl::StrCat("This device doesn't support ", element_size,
"-channel textures."));
}
cl_int error_code;
cl_mem memory =
CreateImage2DLegacy(context.context(), mem_flags, &format, &desc,
const_cast<uint8_t*>(data_ptr), &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create single 2D texture (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
*result = CLMemory(memory, true);
return absl::OkStatus();
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
}
absl::Status CreateImageBufferFromBuffer(const CLContext& context,
cl_mem memory, DataType data_type,
int width, cl_mem* result) {
cl_image_format format;
cl_image_desc desc;
std::memset(&desc, 0, sizeof(desc));
desc.image_type = CL_MEM_OBJECT_IMAGE1D_BUFFER;
desc.image_width = width;
desc.mem_object = memory;
format.image_channel_data_type = DataTypeToChannelType(data_type);
format.image_channel_order = CL_RGBA;
cl_int error_code;
*result = clCreateImage(context.context(), CL_MEM_READ_WRITE, &format, &desc,
nullptr, &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create Image from Buffer (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
return absl::OkStatus();
}
absl::Status CreateImage2DFromBuffer(const CLContext& context, cl_mem memory,
DataType data_type, int width, int height,
int channels, int width_pixel_alignment,
cl_mem* result) {
if (!context.IsFloatTexture2DSupported(channels, data_type)) {
return absl::InvalidArgumentError(absl::StrCat(
"This device doesn't support ", channels, "-channel textures."));
}
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = width;
desc.image_height = height;
desc.image_depth = 0;
const size_t width_aligned = AlignByN(width, width_pixel_alignment);
desc.image_row_pitch = width_aligned * channels * SizeOf(data_type);
desc.image_slice_pitch = 0;
desc.num_mip_levels = 0;
desc.num_samples = 0;
desc.mem_object = memory;
cl_image_format format;
format.image_channel_order = ToChannelOrder(channels);
format.image_channel_data_type = DataTypeToChannelType(data_type);
cl_int error_code;
*result = CreateImage2DLegacy(context.context(), CL_MEM_READ_WRITE, &format,
&desc, nullptr, &error_code);
if (error_code != CL_SUCCESS) {
return absl::UnknownError(
absl::StrCat("Failed to create Image2D from Buffer (clCreateImage): ",
CLErrorCodeToString(error_code)));
}
return absl::OkStatus();
}
}
Tensor::Tensor(cl_mem memory, bool memory_owner,
const TensorDescriptor& descriptor)
: memory_(memory),
image_buffer_memory_(nullptr),
memory_owner_(memory_owner),
descriptor_(descriptor) {}
Tensor::Tensor(cl_mem memory, bool memory_owner, cl_mem image_buffer_memory,
const TensorDescriptor& descriptor)
: memory_(memory),
image_buffer_memory_(image_buffer_memory),
memory_owner_(memory_owner),
descriptor_(descriptor) {
if (image_buffer_memory &&
(descriptor.GetStorageType() == TensorStorageType::TEXTURE_2D ||
descriptor.GetStorageType() == TensorStorageType::SINGLE_TEXTURE_2D)) {
buffer_based_ = true;
}
}
Tensor::Tensor(Tensor&& tensor)
: memory_(tensor.memory_),
image_buffer_memory_(tensor.image_buffer_memory_),
memory_owner_(tensor.memory_owner_),
buffer_based_(tensor.buffer_based_),
descriptor_(std::move(tensor.descriptor_)),
aligned_texture_width_(tensor.aligned_texture_width_) {
tensor.memory_ = nullptr;
tensor.image_buffer_memory_ = nullptr;
}
Tensor& Tensor::operator=(Tensor&& tensor) {
if (this != &tensor) {
Release();
std::swap(memory_, tensor.memory_);
std::swap(image_buffer_memory_, tensor.image_buffer_memory_);
std::swap(memory_owner_, tensor.memory_owner_);
std::swap(buffer_based_, tensor.buffer_based_);
descriptor_ = std::move(tensor.descriptor_);
std::swap(aligned_texture_width_, tensor.aligned_texture_width_);
}
return *this;
}
void Tensor::Release() {
if (image_buffer_memory_) {
clReleaseMemObject(image_buffer_memory_);
image_buffer_memory_ = nullptr;
}
if (memory_owner_ && memory_) {
clReleaseMemObject(memory_);
memory_ = nullptr;
}
}
absl::Status Tensor::GetGPUResources(const GPUObjectDescriptor* obj_ptr,
GPUResourcesWithValue* resources) const {
const auto* buffer_desc = dynamic_cast<const BufferDescriptor*>(obj_ptr);
if (buffer_desc) {
if (descriptor_.GetStorageType() != TensorStorageType::BUFFER &&
descriptor_.GetStorageType() != TensorStorageType::IMAGE_BUFFER) {
return absl::InvalidArgumentError(
"Tensor can be used with BufferDescriptor only with "
"TensorStorageType::BUFFER/TensorStorageType::IMAGE_BUFFER.");
}
resources->buffers.push_back({"buffer", memory_});
return absl::OkStatus();
}
const auto* tensor_desc = dynamic_cast<const TensorDescriptor*>(obj_ptr);
if (!tensor_desc) {
return absl::InvalidArgumentError("Expected TensorDescriptor on input.");
}
tensor_desc->GetGpuResources(descriptor_.GetBHWDCShape(),
&resources->generic);
if (descriptor_.GetStorageType() == TensorStorageType::BUFFER) {
resources->buffers.push_back({"buffer", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_2D ||
descriptor_.GetStorageType() ==
TensorStorageType::SINGLE_TEXTURE_2D) {
if (obj_ptr->GetAccess() == AccessType::WRITE &&
tensor_desc->GetUseBufferForWriteOnlyTexture2d()) {
resources->AddInt("aligned_texture_width", aligned_texture_width_);
resources->buffers.push_back({"buffer", memory_});
} else {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
resources->images2d.push_back({"image2d", mem});
}
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_ARRAY) {
resources->image2d_arrays.push_back({"image2d_array", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::TEXTURE_3D) {
resources->images3d.push_back({"image3d", memory_});
} else if (descriptor_.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
if (obj_ptr->GetAccess() == AccessType::WRITE &&
tensor_desc->GetUseBufferForWriteOnlyImageBuffer()) {
resources->buffers.push_back({"buffer", memory_});
} else {
resources->image_buffers.push_back(
{"image_buffer", image_buffer_memory_});
}
}
return absl::OkStatus();
}
cl_mem Tensor::GetMemoryPtr() const {
if (buffer_based_) {
return image_buffer_memory_;
} else {
return descriptor_.GetStorageType() == TensorStorageType::IMAGE_BUFFER
? image_buffer_memory_
: memory_;
}
}
cl_mem Tensor::GetMemoryPtrForWriting() const {
if (buffer_based_) {
return image_buffer_memory_;
} else {
return memory_;
}
}
absl::Status Tensor::CreateFromDescriptor(const TensorDescriptor& desc,
CLContext* context) {
desc.CopyWithoutData(&descriptor_);
memory_owner_ = true;
CLMemory memory;
RETURN_IF_ERROR(AllocateTensorMemoryInternal(*context, desc, &memory));
memory_ = memory.Release();
if (desc.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor_.GetStorageDims();
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(*context, memory_, desc.GetDataType(),
storage_dims[0], &image_buffer_memory_));
}
return absl::OkStatus();
}
absl::Status Tensor::UploadDescriptorData(const TensorDescriptor& desc,
CLCommandQueue* queue) {
return WriteData(desc.GetData().data(), queue);
}
absl::Status Tensor::ToDescriptor(TensorDescriptor* desc,
CLCommandQueue* queue) const {
*desc = descriptor_;
std::vector<uint8_t> data(GetMemorySizeInBytes());
RETURN_IF_ERROR(ReadData(data.data(), queue));
desc->SetData(std::move(data));
return absl::OkStatus();
}
absl::Status Tensor::WriteData(const void* ptr, CLCommandQueue* queue) {
switch (descriptor_.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER:
RETURN_IF_ERROR(
queue->EnqueueWriteBuffer(memory_, GetMemorySizeInBytes(), ptr));
break;
case TensorStorageType::TEXTURE_ARRAY:
case TensorStorageType::TEXTURE_2D:
case TensorStorageType::TEXTURE_3D:
case TensorStorageType::SINGLE_TEXTURE_2D: {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
RETURN_IF_ERROR(queue->EnqueueWriteImage(
mem, descriptor_.GetFullTensorRegion(), ptr));
break;
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
return absl::OkStatus();
}
absl::Status Tensor::ReadData(void* ptr, CLCommandQueue* queue) const {
switch (descriptor_.GetStorageType()) {
case TensorStorageType::BUFFER:
case TensorStorageType::IMAGE_BUFFER:
RETURN_IF_ERROR(
queue->EnqueueReadBuffer(memory_, GetMemorySizeInBytes(), ptr));
break;
case TensorStorageType::TEXTURE_ARRAY:
case TensorStorageType::TEXTURE_2D:
case TensorStorageType::TEXTURE_3D:
case TensorStorageType::SINGLE_TEXTURE_2D: {
cl_mem mem = buffer_based_ ? image_buffer_memory_ : memory_;
RETURN_IF_ERROR(
queue->EnqueueReadImage(mem, descriptor_.GetFullTensorRegion(), ptr));
break;
}
default:
return absl::InternalError("Unsupported tensor storage type");
}
return absl::OkStatus();
}
absl::Status CreateTensor(const CLContext& context,
const TensorDescriptor& descriptor, Tensor* result) {
CLMemory mem;
RETURN_IF_ERROR(AllocateTensorMemoryInternal(context, descriptor, &mem));
cl_mem memory = mem.Release();
if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
cl_mem image_memory;
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(context, memory, descriptor.GetDataType(),
storage_dims[0], &image_memory));
*result = Tensor(memory, true, image_memory, descriptor);
} else {
*result = Tensor(memory, true, descriptor);
}
return absl::OkStatus();
}
absl::Status CreateTensorShared(const CLContext& context, cl_mem memory,
const TensorDescriptor& descriptor,
Tensor* result) {
const bool memory_owner = false;
if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
cl_mem image_memory;
RETURN_IF_ERROR(
CreateImageBufferFromBuffer(context, memory, descriptor.GetDataType(),
storage_dims[0], &image_memory));
*result = Tensor(memory, memory_owner, image_memory, descriptor);
} else {
*result = Tensor(memory, memory_owner, descriptor);
}
return absl::OkStatus();
}
absl::Status CreateTensorSharedImage2DBuffer(const CLContext& context,
cl_mem memory,
const TensorDescriptor& descriptor,
int width_pixel_alignment,
Tensor* result) {
std::vector<uint64_t> storage_dims = descriptor.GetStorageDims();
const int width = storage_dims[0];
const int height = storage_dims[1];
const int channels = descriptor.GetElementSize();
cl_mem image_memory;
RETURN_IF_ERROR(CreateImage2DFromBuffer(
context, memory, descriptor.GetDataType(), width, height, channels,
width_pixel_alignment, &image_memory));
*result = Tensor(memory, false, image_memory, descriptor);
result->aligned_texture_width_ = AlignByN(width, width_pixel_alignment);
return absl::OkStatus();
}
absl::Status AllocateTensorMemory(const CLContext& context,
const TensorDescriptor& descriptor,
CLMemory* result) {
return AllocateTensorMemoryInternal(context, descriptor, result);
}
}
}
} | #include "tensorflow/cc/experimental/base/public/tensor.h"
#include <stddef.h>
#include <stdint.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/c/tf_datatype.h"
#include "tensorflow/cc/experimental/base/public/status.h"
#include "tensorflow/cc/experimental/base/tests/tensor_types_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace {
using tensorflow::experimental::cc::Status;
using tensorflow::experimental::cc::Tensor;
using SimpleTypes = ::testing::Types<
tensorflow::FloatType, tensorflow::DoubleType, tensorflow::Int32Type,
tensorflow::UINT8Type, tensorflow::INT8Type, tensorflow::INT64Type,
tensorflow::UINT16Type, tensorflow::UINT32Type, tensorflow::UINT64Type>;
template <typename T>
class ConstructScalarTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(ConstructScalarTensorTest, SimpleTypes);
TYPED_TEST(ConstructScalarTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
typename TypeParam::type value = 42;
Tensor tensor = Tensor::FromBuffer(dtype, {},
&value,
sizeof(value),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 0);
EXPECT_EQ(tensor.dtype(), dtype);
EXPECT_EQ(*reinterpret_cast<typename TypeParam::type*>(tensor.data()), 42);
EXPECT_EQ(tensor.num_bytes(), sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), 1);
}
template <typename T>
class Construct1DTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct1DTensorTest, SimpleTypes);
TYPED_TEST(Construct1DTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape;
shape.push_back(value.size());
Tensor tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 1);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
template <typename T>
class Construct2DTensorTest : public ::testing::Test {};
TYPED_TEST_SUITE(Construct2DTensorTest, SimpleTypes);
TYPED_TEST(Construct2DTensorTest, ValidTensorAttributesAfterConstruction) {
Status status;
TF_DataType dtype = TypeParam::kDType;
std::vector<typename TypeParam::type> value = {42, 100, 0, 1, 4, 29};
std::vector<int64_t> shape({2, 3});
Tensor tensor = Tensor::FromBuffer(
dtype, shape,
value.data(),
value.size() * sizeof(typename TypeParam::type),
[](void*, size_t) {}, &status);
ASSERT_TRUE(status.ok()) << status.message();
EXPECT_EQ(tensor.dims(), 2);
EXPECT_EQ(tensor.dtype(), dtype);
absl::Span<const typename TypeParam::type> tensor_view(
reinterpret_cast<typename TypeParam::type*>(tensor.data()), value.size());
EXPECT_EQ(tensor_view[0], 42);
EXPECT_EQ(tensor_view[1], 100);
EXPECT_EQ(tensor_view[2], 0);
EXPECT_EQ(tensor_view[3], 1);
EXPECT_EQ(tensor_view[4], 4);
EXPECT_EQ(tensor_view[5], 29);
EXPECT_EQ(tensor.num_bytes(),
value.size() * sizeof(typename TypeParam::type));
EXPECT_EQ(tensor.num_elements(), value.size());
}
TEST(CPPTensorAPI, ConstructTensorFromBuffer) {
bool done = false;
Status status;
std::vector<int32_t> data_vector({12, 14, 20, 18, 39, 42, 100});
{
std::vector<int64_t> shape;
shape.push_back(data_vector.size());
Tensor::DeleterCallback callback = [&done](void* data, size_t len) {
done = true;
};
Tensor tensor =
Tensor::FromBuffer(TF_INT32, shape,
data_vector.data(),
data_vector.size() * sizeof(int32_t),
callback, &status);
ASSERT_TRUE(status.ok()) << status.message();
}
EXPECT_TRUE(done);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/experimental/base/tests/tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75a7738d-254a-4171-9083-e75edd0bb8bc | cpp | google/tensorstore | neuroglancer_compressed_segmentation | tensorstore/internal/compression/neuroglancer_compressed_segmentation.cc | tensorstore/internal/compression/neuroglancer_compressed_segmentation_test.cc | #include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <string>
#include <string_view>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/container/flat_hash_map.h"
namespace tensorstore {
namespace neuroglancer_compressed_segmentation {
constexpr size_t kBlockHeaderSize = 2;
void WriteBlockHeader(size_t encoded_value_base_offset,
size_t table_base_offset, size_t encoding_bits,
void* output) {
absl::little_endian::Store32(output,
table_base_offset | (encoding_bits << 24));
absl::little_endian::Store32(static_cast<char*>(output) + 4,
encoded_value_base_offset);
}
template <typename Label>
void EncodeBlock(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], size_t base_offset,
size_t* encoded_bits_output, size_t* table_offset_output,
EncodedValueCache<Label>* cache, std::string* output) {
if (input_shape[0] == 0 && input_shape[1] == 0 && input_shape[2] == 0) {
*encoded_bits_output = 0;
*table_offset_output = 0;
return;
}
constexpr size_t num_32bit_words_per_label = sizeof(Label) / 4;
absl::flat_hash_map<Label, uint32_t> seen_values;
std::vector<Label> seen_values_inv;
const auto ForEachElement = [&](auto func) {
auto* input_z = reinterpret_cast<const char*>(input);
for (ptrdiff_t z = 0; z < input_shape[0]; ++z) {
auto* input_y = input_z;
for (ptrdiff_t y = 0; y < input_shape[1]; ++y) {
auto* input_x = input_y;
for (ptrdiff_t x = 0; x < input_shape[2]; ++x) {
func(z, y, x, *reinterpret_cast<const Label*>(input_x));
input_x += input_byte_strides[2];
}
input_y += input_byte_strides[1];
}
input_z += input_byte_strides[0];
}
};
Label previous_value = input[0] + 1;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
if (value != previous_value) {
previous_value = value;
if (seen_values.emplace(value, 0).second) {
seen_values_inv.push_back(value);
}
}
});
std::sort(seen_values_inv.begin(), seen_values_inv.end());
for (size_t i = 0; i < seen_values_inv.size(); ++i) {
seen_values[seen_values_inv[i]] = static_cast<uint32_t>(i);
}
size_t encoded_bits = 0;
if (seen_values.size() != 1) {
encoded_bits = 1;
while ((size_t(1) << encoded_bits) < seen_values.size()) {
encoded_bits *= 2;
}
}
*encoded_bits_output = encoded_bits;
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] + 31) /
32;
const size_t encoded_value_base_offset = output->size();
assert((encoded_value_base_offset - base_offset) % 4 == 0);
size_t elements_to_write = encoded_size_32bits;
bool write_table;
{
auto it = cache->find(seen_values_inv);
if (it == cache->end()) {
write_table = true;
elements_to_write += seen_values.size() * num_32bit_words_per_label;
*table_offset_output =
(encoded_value_base_offset - base_offset) / 4 + encoded_size_32bits;
} else {
write_table = false;
*table_offset_output = it->second;
}
}
output->resize(encoded_value_base_offset + elements_to_write * 4);
char* output_ptr = output->data() + encoded_value_base_offset;
ForEachElement([&](size_t z, size_t y, size_t x, Label value) {
uint32_t index = seen_values.at(value);
size_t output_offset = x + block_shape[2] * (y + block_shape[1] * z);
void* cur_ptr = output_ptr + output_offset * encoded_bits / 32 * 4;
absl::little_endian::Store32(
cur_ptr, absl::little_endian::Load32(cur_ptr) |
(index << (output_offset * encoded_bits % 32)));
});
if (write_table) {
output_ptr =
output->data() + encoded_value_base_offset + encoded_size_32bits * 4;
for (auto value : seen_values_inv) {
for (size_t word_i = 0; word_i < num_32bit_words_per_label; ++word_i) {
absl::little_endian::Store32(
output_ptr + word_i * 4,
static_cast<uint32_t>(value >> (32 * word_i)));
}
output_ptr += num_32bit_words_per_label * 4;
}
cache->emplace(seen_values_inv,
static_cast<uint32_t>(*table_offset_output));
}
}
template <class Label>
void EncodeChannel(const Label* input, const ptrdiff_t input_shape[3],
const ptrdiff_t input_byte_strides[3],
const ptrdiff_t block_shape[3], std::string* output) {
EncodedValueCache<Label> cache;
const size_t base_offset = output->size();
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (input_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
output->resize(base_offset + block_index_size * 4);
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t input_block_shape[3];
ptrdiff_t input_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
input_block_shape[i] = std::min(block_shape[i], input_shape[i] - pos);
input_offset += pos * input_byte_strides[i];
}
const size_t encoded_value_base_offset =
(output->size() - base_offset) / 4;
size_t encoded_bits, table_offset;
EncodeBlock(reinterpret_cast<const Label*>(
reinterpret_cast<const char*>(input) + input_offset),
input_block_shape, input_byte_strides, block_shape,
base_offset, &encoded_bits, &table_offset, &cache, output);
WriteBlockHeader(
encoded_value_base_offset, table_offset, encoded_bits,
output->data() + base_offset + block_offset * kBlockHeaderSize * 4);
}
}
}
}
template <class Label>
void EncodeChannels(const Label* input, const ptrdiff_t input_shape[3 + 1],
const ptrdiff_t input_byte_strides[3 + 1],
const ptrdiff_t block_shape[3], std::string* output) {
const size_t base_offset = output->size();
output->resize(base_offset + input_shape[0] * 4);
for (ptrdiff_t channel_i = 0; channel_i < input_shape[0]; ++channel_i) {
absl::little_endian::Store32(output->data() + base_offset + channel_i * 4,
(output->size() - base_offset) / 4);
EncodeChannel(
reinterpret_cast<const Label*>(reinterpret_cast<const char*>(input) +
input_byte_strides[0] * channel_i),
input_shape + 1, input_byte_strides + 1, block_shape, output);
}
}
void ReadBlockHeader(const void* header, size_t* encoded_value_base_offset,
size_t* table_base_offset, size_t* encoding_bits) {
auto h = absl::little_endian::Load64(header);
*table_base_offset = h & 0xffffff;
*encoding_bits = (h >> 24) & 0xff;
*encoded_value_base_offset = (h >> 32) & 0xffffff;
}
template <typename Label>
bool DecodeBlock(size_t encoded_bits, const char* encoded_input,
const char* table_input, size_t table_size,
const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
const auto for_each_position = [&](auto callback) {
auto* output_z = reinterpret_cast<char*>(output);
for (ptrdiff_t z = 0; z < output_shape[0]; ++z) {
auto* output_y = output_z;
for (ptrdiff_t y = 0; y < output_shape[1]; ++y) {
auto* output_x = output_y;
for (ptrdiff_t x = 0; x < output_shape[2]; ++x) {
auto& label = *reinterpret_cast<Label*>(output_x);
if (!callback(label, z, y, x)) return false;
output_x += output_byte_strides[2];
}
output_y += output_byte_strides[1];
}
output_z += output_byte_strides[0];
}
return true;
};
const auto read_label = [&](size_t index) -> Label {
if constexpr (sizeof(Label) == 4) {
return absl::little_endian::Load32(table_input + index * sizeof(Label));
} else {
return absl::little_endian::Load64(table_input + index * sizeof(Label));
}
};
if (encoded_bits == 0) {
if (table_size == 0) return false;
const Label label = read_label(0);
return for_each_position(
[&](Label& output_label, ptrdiff_t z, ptrdiff_t y, ptrdiff_t x) {
output_label = label;
return true;
});
}
const uint32_t encoded_value_mask = (1U << encoded_bits) - 1;
return for_each_position([&](Label& output_label, ptrdiff_t z, ptrdiff_t y,
ptrdiff_t x) {
size_t encoded_offset = x + block_shape[2] * (y + block_shape[1] * z);
auto index = absl::little_endian::Load32(
encoded_input + encoded_offset * encoded_bits / 32 * 4) >>
(encoded_offset * encoded_bits % 32) &
encoded_value_mask;
if (index >= table_size) return false;
output_label = read_label(index);
return true;
});
return true;
}
template <typename Label>
bool DecodeChannel(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3],
const ptrdiff_t output_byte_strides[3], Label* output) {
if ((input.size() % 4) != 0) return false;
ptrdiff_t grid_shape[3];
size_t block_index_size = kBlockHeaderSize;
for (size_t i = 0; i < 3; ++i) {
grid_shape[i] = (output_shape[i] + block_shape[i] - 1) / block_shape[i];
block_index_size *= grid_shape[i];
}
if (input.size() / 4 < block_index_size) {
return false;
}
ptrdiff_t block[3];
for (block[0] = 0; block[0] < grid_shape[0]; ++block[0]) {
for (block[1] = 0; block[1] < grid_shape[1]; ++block[1]) {
for (block[2] = 0; block[2] < grid_shape[2]; ++block[2]) {
const size_t block_offset =
block[2] + grid_shape[2] * (block[1] + grid_shape[1] * block[0]);
ptrdiff_t output_block_shape[3];
ptrdiff_t output_offset = 0;
for (size_t i = 0; i < 3; ++i) {
auto pos = block[i] * block_shape[i];
output_block_shape[i] =
std::min(block_shape[i], output_shape[i] - pos);
output_offset += pos * output_byte_strides[i];
}
size_t encoded_value_base_offset;
size_t encoded_bits, table_offset;
ReadBlockHeader(input.data() + block_offset * kBlockHeaderSize * 4,
&encoded_value_base_offset, &table_offset,
&encoded_bits);
if (encoded_bits > 32 || (encoded_bits & (encoded_bits - 1)) != 0) {
return false;
}
if (encoded_value_base_offset > input.size() / 4 ||
table_offset > input.size() / 4) {
return false;
}
const size_t encoded_size_32bits =
(encoded_bits * block_shape[0] * block_shape[1] * block_shape[2] +
31) /
32;
if ((encoded_value_base_offset + encoded_size_32bits) * 4 >
input.size()) {
return false;
}
auto* block_output = reinterpret_cast<Label*>(
reinterpret_cast<char*>(output) + output_offset);
const char* encoded_input =
input.data() + encoded_value_base_offset * 4;
const char* table_input = input.data() + table_offset * 4;
const size_t table_size =
(input.size() - table_offset * 4) / sizeof(Label);
if (!DecodeBlock(encoded_bits, encoded_input, table_input, table_size,
block_shape, output_block_shape, output_byte_strides,
block_output)) {
return false;
}
}
}
}
return true;
}
template <typename Label>
bool DecodeChannels(std::string_view input, const ptrdiff_t block_shape[3],
const ptrdiff_t output_shape[3 + 1],
const ptrdiff_t output_byte_strides[3 + 1], Label* output) {
if ((input.size() % 4) != 0) return false;
if (input.size() / 4 < static_cast<size_t>(output_shape[0])) {
return false;
}
for (ptrdiff_t channel_i = 0; channel_i < output_shape[0]; ++channel_i) {
const size_t offset =
absl::little_endian::Load32(input.data() + channel_i * 4);
if (offset > input.size() / 4) {
return false;
}
if (!DecodeChannel(
input.substr(offset * 4), block_shape, output_shape + 1,
output_byte_strides + 1,
reinterpret_cast<Label*>(reinterpret_cast<char*>(output) +
output_byte_strides[0] * channel_i))) {
return false;
}
}
return true;
}
#define DO_INSTANTIATE(Label) \
template void EncodeBlock<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
size_t base_offset, size_t* encoded_bits_output, \
size_t* table_offset_output, EncodedValueCache<Label>* cache, \
std::string* output); \
template void EncodeChannel<Label>( \
const Label* input, const ptrdiff_t input_shape[3], \
const ptrdiff_t input_byte_strides[3], const ptrdiff_t block_shape[3], \
std::string* output); \
template void EncodeChannels<Label>( \
const Label* input, const ptrdiff_t input_shape[3 + 1], \
const ptrdiff_t input_byte_strides[3 + 1], \
const ptrdiff_t block_shape[3], std::string* output); \
template bool DecodeBlock( \
size_t encoded_bits, const char* encoded_input, const char* table_input, \
size_t table_size, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannel<Label>( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3], const ptrdiff_t output_byte_strides[3], \
Label* output); \
template bool DecodeChannels( \
std::string_view input, const ptrdiff_t block_shape[3], \
const ptrdiff_t output_shape[3 + 1], \
const ptrdiff_t output_byte_strides[3 + 1], Label* output); \
DO_INSTANTIATE(uint32_t)
DO_INSTANTIATE(uint64_t)
#undef DO_INSTANTIATE
}
} | #include "tensorstore/internal/compression/neuroglancer_compressed_segmentation.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
namespace {
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::DecodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeBlock;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannel;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodeChannels;
using ::tensorstore::neuroglancer_compressed_segmentation::EncodedValueCache;
std::vector<uint32_t> AsVec(std::string_view s) {
EXPECT_EQ(0, s.size() % 4);
std::vector<uint32_t> out(s.size() / 4);
for (size_t i = 0; i < out.size(); ++i) {
out[i] = absl::little_endian::Load32(s.data() + i * 4);
}
return out;
}
std::string FromVec(std::vector<uint32_t> v) {
std::string s;
s.resize(v.size() * 4);
for (size_t i = 0; i < v.size(); ++i) {
absl::little_endian::Store32(s.data() + i * 4, v[i]);
}
return s;
}
template <typename T>
void TestBlockRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
size_t expected_encoded_bits,
size_t expected_table_offset,
std::vector<uint32_t> expected_output,
EncodedValueCache<T> expected_cache) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
size_t encoded_bits;
size_t table_offset;
EncodedValueCache<uint64_t> cache;
const size_t initial_offset = output.size();
EncodeBlock(input.data(), input_shape, input_byte_strides, block_shape,
initial_offset, &encoded_bits, &table_offset, &cache, &output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_encoded_bits, encoded_bits);
EXPECT_EQ(expected_table_offset, table_offset);
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
EXPECT_EQ(expected_cache, cache);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeBlock(
encoded_bits, output.data() + initial_offset,
output.data() + initial_offset + table_offset * 4,
(output.size() - (initial_offset + table_offset * 4)) / sizeof(T),
block_shape, input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestSingleChannelRoundTrip(std::vector<T> input,
const std::ptrdiff_t (&input_shape)[3],
const std::ptrdiff_t (&block_shape)[3],
std::vector<uint32_t> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2], input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
const size_t initial_offset = output.size();
EncodeChannel(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_EQ(expected_output, AsVec(output.substr(initial_offset)));
std::vector<T> decoded_output(input.size());
std::vector<char> output_copy(output.begin() + initial_offset, output.end());
EXPECT_TRUE(DecodeChannel(
std::string_view(output_copy.data(), output_copy.size()), block_shape,
input_shape, input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
template <typename T>
void TestDecodeChannelError(std::string_view input,
const std::ptrdiff_t (&block_shape)[3],
const std::ptrdiff_t (&input_shape)[3]) {
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[3] = {
input_shape[1] * input_shape[2] * s, input_shape[2] * s, s};
std::vector<T> decoded_output(input_shape[0] * input_shape[1] *
input_shape[2]);
EXPECT_FALSE(DecodeChannel(input, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
}
template <typename T>
void TestMultipleChannelsRoundTripBytes(
std::vector<T> input, const std::ptrdiff_t (&input_shape)[4],
const std::ptrdiff_t (&block_shape)[4],
std::vector<unsigned char> expected_output) {
std::string output{1, 2, 3};
ASSERT_EQ(input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3],
input.size());
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
const size_t initial_offset = output.size();
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
ASSERT_THAT(output.substr(0, 3), ::testing::ElementsAre(1, 2, 3));
EXPECT_THAT(output.substr(initial_offset),
::testing::ElementsAreArray(expected_output));
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output.substr(initial_offset), block_shape,
input_shape, input_byte_strides,
decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
TEST(EncodeBlockTest, Basic0) {
TestBlockRoundTrip<uint64_t>({3, 3, 3, 3},
{1, 2, 2},
{1, 2, 2},
0,
0,
{3, 0},
{{{3}, 0}});
}
TEST(EncodeBlockTest, Basic1) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 4},
{1, 2, 2},
{1, 2, 2},
1,
1,
{0b1101, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, SizeMismatch) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 4, 3},
{1, 2, 2},
{1, 2, 3},
1,
1,
{0b001001, 3, 0, 4, 0},
{{{3, 4}, 1}});
}
TEST(EncodeBlockTest, Basic2) {
TestBlockRoundTrip<uint64_t>(
{4, 3, 5, 4},
{1, 2, 2},
{1, 2, 2},
2,
1,
{0b01100001, 3, 0, 4, 0, 5, 0},
{{{3, 4, 5}, 1}});
}
TEST(EncodeChannelTest, Basic) {
TestSingleChannelRoundTrip<uint64_t>(
{4, 3, 5, 4, 1, 3, 3, 3},
{2, 2, 2},
{1, 2, 2},
{5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0,
0b1110, 1, 0, 3, 0});
}
TEST(EncodeChannelTest, BasicCached) {
TestSingleChannelRoundTrip<uint64_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
16 | (1 << 24),
15,
16 | (1 << 24),
20,
9 | (2 << 24),
21,
0b01100001,
3,
0,
4,
0,
5,
0,
0b1110,
1,
0,
3,
0,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelTest, BasicCachedZeroBitsAtEnd) {
TestSingleChannelRoundTrip<uint64_t>(
{
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
3, 3, 3, 3,
},
{4, 2, 2},
{1, 2, 2},
{
8 | (0 << 24),
8,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
8 | (0 << 24),
10,
3,
0,
});
}
TEST(EncodeChannelTest, BasicCached32) {
TestSingleChannelRoundTrip<uint32_t>(
{
4, 3, 5, 4,
1, 3, 3, 3,
3, 1, 1, 1,
5, 5, 3, 4,
},
{4, 2, 2},
{1, 2, 2},
{
9 | (2 << 24),
8,
13 | (1 << 24),
12,
13 | (1 << 24),
15,
9 | (2 << 24),
16,
0b01100001,
3,
4,
5,
0b1110,
1,
3,
0b00000001,
0b01001010,
});
}
TEST(EncodeChannelsTest, Basic1Channel1Block) {
TestMultipleChannelsRoundTripBytes<uint64_t>(
{4, 0, 4, 0},
{1, 1, 2, 2},
{1, 2, 2},
{
1, 0, 0, 0,
3, 0, 0, 1, 2, 0, 0, 0,
0b0101, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
4, 0, 0, 0,
0, 0, 0, 0,
});
}
TEST(DecodeChannelTest, SizeNotMultipleOf4) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 1);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, Truncated) {
auto input = FromVec({5 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
input.resize(input.size() - 4);
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, NonPowerOf2EncodedBits) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MoreThan32EncodedBits) {
auto input = FromVec({5 | (33 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingBlockHeaders) {
auto input = FromVec({5 | (3 << 24), 4, 12 | (1 << 24)});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidEncodedValueOffset) {
auto input = FromVec({5 | (2 << 24), 16, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, InvalidTableOffset) {
auto input = FromVec({16 | (2 << 24), 4, 12 | (1 << 24), 11, 0b01100001, 3, 0,
4, 0, 5, 0, 0b1110, 1, 0, 3, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
TEST(DecodeChannelTest, MissingEncodedValues) {
auto input = FromVec(
{5 | (2 << 24), 4, 0 | (1 << 24), 11, 0b01100001, 3, 0, 4, 0, 5, 0});
TestDecodeChannelError<uint64_t>(
input,
{1, 2, 2},
{2, 2, 2});
}
template <typename T>
void RandomRoundTrip(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
absl::BitGen gen;
for (size_t iter = 0; iter < num_iterations; ++iter) {
std::ptrdiff_t block_shape[3];
std::ptrdiff_t input_shape[4];
input_shape[0] = absl::Uniform(gen, 1u, max_channels + 1);
for (int i = 0; i < 3; ++i) {
block_shape[i] = absl::Uniform(gen, 1u, max_block_size + 1);
input_shape[i + 1] = absl::Uniform(gen, 1u, max_input_size + 1);
}
std::vector<T> input(input_shape[0] * input_shape[1] * input_shape[2] *
input_shape[3]);
std::vector<T> labels(max_distinct_ids);
for (auto& label : labels) {
label = absl::Uniform<T>(gen);
}
for (auto& label : input) {
label = labels[absl::Uniform(gen, 0u, labels.size())];
}
constexpr std::ptrdiff_t s = sizeof(T);
const std::ptrdiff_t input_byte_strides[4] = {
input_shape[1] * input_shape[2] * input_shape[3] * s,
input_shape[2] * input_shape[3] * s, input_shape[3] * s, s};
std::string output;
EncodeChannels(input.data(), input_shape, input_byte_strides, block_shape,
&output);
std::vector<T> decoded_output(input.size());
EXPECT_TRUE(DecodeChannels(output, block_shape, input_shape,
input_byte_strides, decoded_output.data()));
EXPECT_EQ(input, decoded_output);
}
}
void RandomRoundTripBothDataTypes(size_t max_block_size, size_t max_input_size,
size_t max_channels, size_t max_distinct_ids,
size_t num_iterations) {
RandomRoundTrip<uint32_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
RandomRoundTrip<uint64_t>(max_block_size, max_input_size, max_channels,
max_distinct_ids, num_iterations);
}
TEST(RoundTripTest, Random) {
RandomRoundTripBothDataTypes(4, 10,
3, 16,
100);
RandomRoundTripBothDataTypes(10, 16,
3, 1000,
100);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/neuroglancer_compressed_segmentation.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/neuroglancer_compressed_segmentation_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
020f23af-2962-414a-80bf-392dce42a8cc | cpp | google/arolla | lazy_qtype | arolla/lazy/lazy_qtype.cc | arolla/lazy/lazy_qtype_test.cc | #include "arolla/lazy/lazy_qtype.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "arolla/lazy/lazy.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/meta.h"
namespace arolla {
namespace {
class LazyQType final : public SimpleQType {
public:
explicit LazyQType(QTypePtr value_qtype)
: SimpleQType(meta::type<LazyPtr>(),
"LAZY[" + std::string(value_qtype->name()) + "]",
value_qtype,
"::arolla::LazyQType") {}
};
class LazyQTypeRegistry {
public:
QTypePtr GetLazyQType(QTypePtr value_qtype) {
absl::WriterMutexLock l(&lock_);
auto& result = registry_[value_qtype];
if (!result) {
result = std::make_unique<LazyQType>(value_qtype);
}
return result.get();
}
private:
absl::Mutex lock_;
absl::flat_hash_map<QTypePtr, std::unique_ptr<LazyQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsLazyQType(const QType* qtype) {
return fast_dynamic_downcast_final<const LazyQType*>(qtype) != nullptr;
}
QTypePtr GetLazyQType(QTypePtr value_qtype) {
static absl::NoDestructor<LazyQTypeRegistry> registry;
return registry->GetLazyQType(value_qtype);
}
TypedValue MakeLazyQValue(LazyPtr lazy) {
DCHECK_NE(lazy, nullptr);
auto result = TypedValue::FromValueWithQType(
std::move(lazy), GetLazyQType(lazy->value_qtype()));
DCHECK_OK(result.status());
return *std::move(result);
}
} | #include "arolla/lazy/lazy_qtype.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status_matchers.h"
#include "arolla/lazy/lazy.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::ReprTokenEq;
using ::arolla::testing::TypedValueWith;
TEST(LazyQTypeTest, Basics) {
auto qtype = GetLazyQType<QTypePtr>();
EXPECT_EQ(qtype, GetLazyQType<QTypePtr>());
EXPECT_EQ(qtype->name(), "LAZY[QTYPE]");
EXPECT_EQ(qtype->type_info(), typeid(LazyPtr));
EXPECT_EQ(qtype->type_layout().AllocSize(), sizeof(LazyPtr));
EXPECT_EQ(qtype->type_layout().AllocAlignment().value, alignof(LazyPtr));
EXPECT_TRUE(qtype->type_fields().empty());
EXPECT_EQ(qtype->value_qtype(), GetQTypeQType());
EXPECT_EQ(qtype->qtype_specialization_key(), "::arolla::LazyQType");
}
TEST(LazyQTypeTest, IsLazyQType) {
EXPECT_TRUE(IsLazyQType(GetLazyQType<QTypePtr>()));
EXPECT_TRUE(IsLazyQType(GetLazyQType<int32_t>()));
EXPECT_TRUE(IsLazyQType(GetLazyQType<float>()));
EXPECT_FALSE(IsLazyQType(GetQTypeQType()));
EXPECT_FALSE(IsLazyQType(GetQType<int32_t>()));
EXPECT_FALSE(IsLazyQType(GetQType<float>()));
}
TEST(LazyQTypeTest, MakeLazyQValue) {
auto qvalue = MakeLazyQValue(MakeLazyFromQValue(TypedValue::FromValue(1)));
EXPECT_THAT(qvalue.GenReprToken(), ReprTokenEq("lazy[INT32]"));
ASSERT_EQ(qvalue.GetType(), GetLazyQType<int>());
EXPECT_THAT(qvalue.UnsafeAs<LazyPtr>()->Get(),
IsOkAndHolds(TypedValueWith<int>(1)));
EXPECT_EQ(qvalue.GetFingerprint(),
MakeLazyQValue(MakeLazyFromQValue(TypedValue::FromValue(1)))
.GetFingerprint());
EXPECT_NE(qvalue.GetFingerprint(),
MakeLazyQValue(MakeLazyFromQValue(TypedValue::FromValue(2)))
.GetFingerprint());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/lazy/lazy_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/lazy/lazy_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ed9a82cd-053d-4367-8614-919d3297f750 | cpp | tensorflow/tensorflow | unary_elementwise | tensorflow/lite/experimental/shlo/ops/unary_elementwise.h | tensorflow/lite/experimental/shlo/ops/unary_elementwise_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_UNARY_ELEMENTWISE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_OPS_UNARY_ELEMENTWISE_H_
#include <cstddef>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/quantize.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
namespace detail {
template <typename StorageT, typename ExpressedT, typename F>
void DequantizeOpQuantizePerAxisImpl(
F& op, const Shape& shape, const Axis quantization_dimension,
const StorageT quantization_min, const StorageT quantization_max,
const absl::Span<const StorageT> input_zero_points,
const absl::Span<const ExpressedT> input_scales,
const absl::Span<const StorageT> output_zero_points,
const absl::Span<const ExpressedT> output_scales, const Strides& strides,
const StorageT* input_data, StorageT* output_data, const size_t depth,
size_t quantization_index) {
const DimensionSize dim = shape.Dim(depth);
if (depth + 1 >= shape.Rank()) {
for (DimensionSize i = 0; i < dim; ++i) {
if (depth == quantization_dimension) {
quantization_index = i;
}
const ExpressedT dequantized_input =
Dequantize(*input_data, input_zero_points[quantization_index],
input_scales[quantization_index]);
const ExpressedT dequantized_res = op(dequantized_input);
*output_data = Quantize<StorageT, ExpressedT>(
dequantized_res, output_zero_points[quantization_index],
static_cast<ExpressedT>(1) / output_scales[quantization_index],
quantization_min, quantization_max);
output_data += strides[depth];
input_data += strides[depth];
}
} else {
for (DimensionSize i = 0; i < dim; ++i) {
if (depth == quantization_dimension) {
quantization_index = i;
}
DequantizeOpQuantizePerAxisImpl(
op, shape, quantization_dimension, quantization_min, quantization_max,
input_zero_points, input_scales, output_zero_points, output_scales,
strides, input_data, output_data, depth + 1, quantization_index);
output_data += strides[depth];
input_data += strides[depth];
}
}
}
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerAxis(F&& func, const Tensor& input,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const Shape& shape = input.shape();
const Axis quantization_dimension =
input.quantized_per_axis_element_type().QuantizedDimension();
const absl::Span<const StorageT> input_zero_points =
input.quantized_per_axis_element_type().ZeroPointsAs<storage_type>();
const absl::Span<const ExpressedT> input_scales =
input.quantized_per_axis_element_type().ScalesAs<expressed_type>();
const absl::Span<const StorageT> output_zero_points =
output.quantized_per_axis_element_type().ZeroPointsAs<storage_type>();
const absl::Span<const ExpressedT> output_scales =
output.quantized_per_axis_element_type().ScalesAs<expressed_type>();
const Strides& strides = ComputeStrides(shape);
const StorageT* input_data = input.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
DequantizeOpQuantizePerAxisImpl(
func, shape, quantization_dimension, Storage<storage_type>::kMinValue,
Storage<storage_type>::kMaxValue, input_zero_points, input_scales,
output_zero_points, output_scales, strides, input_data, output_data,
0, 0);
}
template <DataType storage_type, DataType expressed_type, typename F>
void DequantizeOpQuantizePerTensor(F& func, const Tensor& input,
Tensor& output) {
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
const DimensionSize num_elements = input.NumElements();
const StorageT input_zero_point =
input.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT input_scale =
input.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT output_zero_point =
output.quantized_per_tensor_element_type().ZeroPointAs<storage_type>();
const ExpressedT output_scale =
output.quantized_per_tensor_element_type().ScaleAs<expressed_type>();
const StorageT* input_data = input.GetDataAs<storage_type>();
StorageT* output_data = output.GetDataAs<storage_type>();
const ExpressedT inv_scale = static_cast<ExpressedT>(1) / output_scale;
for (DimensionSize i = 0; i < num_elements;
++i, ++input_data, ++output_data) {
const ExpressedT dequantized_input =
Dequantize(*input_data, input_zero_point, input_scale);
const ExpressedT dequantized_res = func(dequantized_input);
*output_data = Quantize<storage_type, expressed_type>(
dequantized_res, output_zero_point, inv_scale);
}
}
template <DataType data_type, class F>
void EvaluateNoQuantization(F&& func, const Tensor& input, Tensor& output) {
absl::c_transform(input.Flat<data_type>(), output.GetDataAs<data_type>(),
static_cast<F&&>(func));
}
}
template <class F>
struct UnaryElementwiseOp {
struct Attributes {};
F func;
};
template <class F>
UnaryElementwiseOp<F> Create(typename UnaryElementwiseOp<F>::Attributes,
const F& func) {
return UnaryElementwiseOp<F>{func};
}
template <class F>
UnaryElementwiseOp<F> Create(typename UnaryElementwiseOp<F>::Attributes,
F&& func) {
return UnaryElementwiseOp<F>{static_cast<F&&>(func)};
}
template <class F>
absl::Status Prepare(UnaryElementwiseOp<F>& op, const Tensor& input,
Tensor& output) {
return Propagate(input.shape(), output.shape());
}
template <class F>
absl::Status Evaluate(UnaryElementwiseOp<F>& op, const Tensor& input,
Tensor& output) {
if (input.IsPerAxisQuantized()) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerAxis,
input.quantized_per_axis_element_type().StorageType(),
input.quantized_per_axis_element_type().ExpressedType(),
op.func, input, output);
} else if (input.IsPerTensorQuantized()) {
DISPATCH_QUANTIZED(
detail::DequantizeOpQuantizePerTensor,
input.quantized_per_tensor_element_type().StorageType(),
input.quantized_per_tensor_element_type().ExpressedType(), op.func,
input, output)
} else {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
input.tensor_element_type(), op.func, input,
output);
}
return absl::OkStatus();
}
}
#endif | #include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include <cstddef>
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/quantized_tensor_element_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::ElementsAreArray;
namespace shlo_ref {
namespace {
struct Abs {
template <class T>
T operator()(const T& val) {
return val < static_cast<T>(0) ? static_cast<T>(-val) : val;
}
};
template <DataType storage_type, DataType expressed_type = DataType::kF32>
struct TestParam {
static constexpr DataType kStorage = storage_type;
static constexpr DataType kExpressed = expressed_type;
using StorageT = StorageType<storage_type>;
using ExpressedT = StorageType<expressed_type>;
};
template <class T>
struct UnaryElementWiseTest : ::testing::Test {};
TYPED_TEST_SUITE(UnaryElementWiseTest, ArithmeticTestTypes);
TYPED_TEST(UnaryElementWiseTest, NonQuantizedWithAbs) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), Abs());
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
template <class T>
struct QuantizedUnaryElementWiseTest : ::testing::Test {};
TYPED_TEST_SUITE(QuantizedUnaryElementWiseTest, QuantizedTestTypes);
TYPED_TEST(QuantizedUnaryElementWiseTest, QuantizedPerTensorWithAbs) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = RandomBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
const ExpressedT scale = static_cast<ExpressedT>(1.5);
const StorageT zero_point = static_cast<StorageT>(5);
const QuantizedElementTypePerTensor tensor_type =
QuantizedElementTypePerTensor(TypeParam::kStorage, zero_point,
TypeParam::kExpressed, scale);
Tensor input_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerTensorTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(), [zero_point, scale](auto v) {
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = Abs()(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, static_cast<ExpressedT>(1.) / scale);
});
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
TYPED_TEST(QuantizedUnaryElementWiseTest, QuantizedPerAxisWithAbs) {
using StorageT = typename TypeParam::StorageT;
using ExpressedT = typename TypeParam::ExpressedT;
const Shape shape({4, 3, 2});
const int quantized_dimension = 2;
const size_t rank = shape.Rank();
const Axis quantized_dimension_size = shape.Dim(quantized_dimension);
const size_t quantization_stride = [&] {
size_t res = 1;
for (int64_t i = rank - 1; i > quantized_dimension; --i) {
res *= shape.Dim(i);
}
return res;
}();
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape);
Vector<StorageT> output_data(shape.NumElements());
Vector<StorageT> zero_points_data = RandomBuffer<TypeParam::kStorage>(
Shape({shape.Dim(2)}), static_cast<StorageT>(-5),
static_cast<StorageT>(5));
Vector<ExpressedT> scales_data = RandomBuffer<TypeParam::kExpressed>(
Shape({shape.Dim(2)}), static_cast<ExpressedT>(1),
static_cast<ExpressedT>(3));
const QuantizedElementTypePerAxis tensor_type = QuantizedElementTypePerAxis(
TypeParam::kStorage, zero_points_data, TypeParam::kExpressed, scales_data,
quantized_dimension);
Tensor input_tensor{
.type = QuantizedPerAxisTensorType{.shape = shape,
.element_type = tensor_type},
.data = input_data.data()};
Tensor output_tensor{
.type = QuantizedPerAxisTensorType{.shape = shape,
.element_type = tensor_type},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(
input_data, expected_data.begin(),
[&, element_index = 0ull, quantization_index = 0ull](auto v) mutable {
const StorageT zero_point = zero_points_data[quantization_index];
const ExpressedT scale = scales_data[quantization_index];
if (++element_index >= quantization_stride) {
element_index = 0;
if (++quantization_index >= quantized_dimension_size) {
quantization_index = 0;
}
}
const ExpressedT dequantized_input = Dequantize(v, zero_point, scale);
const ExpressedT dequantized_res = Abs()(dequantized_input);
return Quantize<TypeParam::kStorage, TypeParam::kExpressed>(
dequantized_res, zero_point, ExpressedT(1) / scale);
});
auto op = Create(UnaryElementwiseOp<Abs>::Attributes{}, Abs());
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, ElementsAreArray(expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/unary_elementwise.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/unary_elementwise_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e17e7a4e-c44e-407c-9a22-e2ea6b473e56 | cpp | tensorflow/tensorflow | atan2 | tensorflow/lite/kernels/atan2.cc | tensorflow/lite/kernels/atan2_test.cc | #include <cmath>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace atan2 {
TfLiteStatus EnsureSameShape(
TfLiteContext* context,
const TfLiteTensor* a, const TfLiteTensor* b) {
TF_LITE_ENSURE_EQ(context,
tflite::NumDimensions(a),
tflite::NumDimensions(b));
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, tflite::NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, tflite::NumOutputs(node), 1);
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
TF_LITE_ENSURE_OK(context, EnsureSameShape(context, input_y, input_x));
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, input_x->type);
TF_LITE_ENSURE_TYPES_EQ(context, input_y->type, output->type);
TF_LITE_ENSURE(context,
input_y->type == kTfLiteFloat32 ||
input_y->type == kTfLiteFloat64);
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input_y->dims);
return context->ResizeTensor(context, output, output_shape);
}
template<typename Float>
TfLiteStatus Atan2(const TfLiteTensor* input_y,
const TfLiteTensor* input_x,
TfLiteTensor* output) {
const Float* data_y = tflite::GetTensorData<Float>(input_y);
const Float* data_x = tflite::GetTensorData<Float>(input_x);
Float* data_output = tflite::GetTensorData<Float>(output);
const int64_t num_elements = NumElements(input_y);
for (int64_t i = 0; i < num_elements; ++i) {
data_output[i] = std::atan2(data_y[i], data_x[i]);
}
return TfLiteStatus::kTfLiteOk;
}
TfLiteStatus Atan2Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_y = tflite::GetInput(context, node, 0);
const TfLiteTensor* input_x = tflite::GetInput(context, node, 1);
TfLiteTensor* output = tflite::GetOutput(context, node, 0);
switch (output->type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(context, Atan2<float>(input_y, input_x, output));
break;
case kTfLiteFloat64:
TF_LITE_ENSURE_OK(context, Atan2<double>(input_y, input_x, output));
break;
default: {
TF_LITE_KERNEL_LOG(
context,
"Unsupported datatype for atan2 output: %s",
TfLiteTypeGetName(output->type));
return TfLiteStatus::kTfLiteError;
}
}
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteRegistration* Register_ATAN2() {
static TfLiteRegistration r = {
nullptr, nullptr, atan2::Atan2Prepare, atan2::Atan2Eval};
return &r;
}
}
}
} | #include <cmath>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
template <typename T>
tflite::TensorType GetTTEnum();
template <>
tflite::TensorType GetTTEnum<float>() {
return tflite::TensorType_FLOAT32;
}
template <>
tflite::TensorType GetTTEnum<double>() {
return tflite::TensorType_FLOAT64;
}
class Atan2Model : public tflite::SingleOpModel {
public:
Atan2Model(tflite::TensorData y,
tflite::TensorData x,
tflite::TensorData output) {
y_ = AddInput(y);
x_ = AddInput(x);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ATAN2, BuiltinOptions_NONE, 0);
BuildInterpreter({GetShape(y_), GetShape(x_)});
}
template <typename T>
std::vector<T> GetOutput(
const std::vector<T>& y,
const std::vector<T>& x) {
PopulateTensor<T>(y_, y);
PopulateTensor<T>(x_, x);
Invoke();
return ExtractVector<T>(output_);
}
private:
int y_;
int x_;
int output_;
};
template <typename Float>
class Atan2Test : public ::testing::Test {
public:
using FloatType = Float;
};
using TestTypes = ::testing::Types<float, double>;
TYPED_TEST_SUITE(Atan2Test, TestTypes);
TYPED_TEST(Atan2Test, TestScalar) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {}};
tflite::TensorData x = {GetTTEnum<Float>(), {}};
tflite::TensorData output = {GetTTEnum<Float>(), {}};
Atan2Model m(y, x, output);
auto got = m.GetOutput<Float>({0.0}, {0.0});
ASSERT_EQ(got.size(), 1);
EXPECT_FLOAT_EQ(got[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({1.0}, {0.0})[0], M_PI/2);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({0.0}, {1.0})[0], 0.0);
ASSERT_FLOAT_EQ(m.GetOutput<Float>({-1.0}, {0.0})[0], -M_PI/2);
}
TYPED_TEST(Atan2Test, TestBatch) {
using Float = typename TestFixture::FloatType;
tflite::TensorData y = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData x = {GetTTEnum<Float>(), {4, 2, 1}};
tflite::TensorData output = {GetTTEnum<Float>(), {4, 2, 1}};
Atan2Model m(y, x, output);
std::vector<Float> y_data = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8};
std::vector<Float> x_data = {0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1};
auto got = m.GetOutput<Float>(y_data, x_data);
ASSERT_EQ(got.size(), 8);
for (int i = 0; i < 8; ++i) {
EXPECT_FLOAT_EQ(got[i], std::atan2(y_data[i], x_data[i]));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/atan2_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c9ed0c99-fb9b-460c-b0cb-e6aefe4b7ca8 | cpp | tensorflow/tensorflow | range | tensorflow/lite/kernels/range.cc | tensorflow/lite/kernels/range_test.cc | #include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <functional>
#include <type_traits>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace range {
namespace {
constexpr int kStartTensor = 0;
constexpr int kLimitTensor = 1;
constexpr int kDeltaTensor = 2;
constexpr int kOutputTensor = 0;
struct OpData {
bool noop;
};
template <typename T>
TfLiteStatus GetSize(TfLiteContext* context, T start, T limit, T delta,
int* size) {
TF_LITE_ENSURE(context, !std::equal_to<T>()(delta, 0));
TF_LITE_ENSURE(
context, (start >= limit && delta < 0) || (start <= limit && delta > 0));
*size =
(std::is_integral<T>::value
? ((std::abs(limit - start) + std::abs(delta) - 1) / std::abs(delta))
: std::ceil(std::abs((limit - start) / delta)));
return kTfLiteOk;
}
TfLiteStatus ResizeOutput(TfLiteContext* context, const TfLiteTensor* start,
const TfLiteTensor* limit, const TfLiteTensor* delta,
TfLiteTensor* output) {
int size = 0;
switch (start->type) {
case kTfLiteInt32: {
TF_LITE_ENSURE_OK(context,
GetSize(context, *GetTensorData<int32_t>(start),
*GetTensorData<int32_t>(limit),
*GetTensorData<int32_t>(delta), &size));
break;
}
case kTfLiteInt64: {
TF_LITE_ENSURE_OK(context,
GetSize(context, *GetTensorData<int64_t>(start),
*GetTensorData<int64_t>(limit),
*GetTensorData<int64_t>(delta), &size));
break;
}
case kTfLiteFloat32: {
TF_LITE_ENSURE_OK(context, GetSize(context, *GetTensorData<float>(start),
*GetTensorData<float>(limit),
*GetTensorData<float>(delta), &size));
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Unknown data type: %d", start->type);
return kTfLiteError;
}
}
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(1);
output_shape_array->data[0] = size;
return context->ResizeTensor(context, output, output_shape_array);
}
template <typename T>
void CalculateRange(const TfLiteTensor* start, const TfLiteTensor* delta,
TfLiteTensor* output) {
const T start_value = *GetTensorData<T>(start);
const T delta_value = *GetTensorData<T>(delta);
T* output_data = GetTensorData<T>(output);
const int num_elements = NumElements(output);
T value = start_value;
for (int i = 0; i < num_elements; ++i) {
output_data[i] = value;
value += delta_value;
}
}
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* start,
const TfLiteTensor* delta, TfLiteTensor* output) {
switch (output->type) {
case kTfLiteInt32: {
CalculateRange<int32_t>(start, delta, output);
break;
}
case kTfLiteFloat32: {
CalculateRange<float>(start, delta, output);
break;
}
case kTfLiteInt64: {
CalculateRange<int64_t>(start, delta, output);
break;
}
default: {
TF_LITE_KERNEL_LOG(context, "Unsupported data type: %d", output->type);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
op_data->noop = false;
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0);
TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0);
const auto dtype = start->type;
if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32 &&
dtype != kTfLiteInt64) {
TF_LITE_KERNEL_LOG(context, "Unknown index output data type: %s",
TfLiteTypeGetName(dtype));
return kTfLiteError;
}
TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype);
TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = dtype;
if (IsConstantOrPersistentTensor(start) &&
IsConstantOrPersistentTensor(limit) &&
IsConstantOrPersistentTensor(delta)) {
SetTensorToPersistentRo(output);
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
op_data->noop = true;
return EvalImpl(context, start, delta, output);
}
SetTensorToDynamic(output);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* start;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kStartTensor, &start));
const TfLiteTensor* limit;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kLimitTensor, &limit));
const TfLiteTensor* delta;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDeltaTensor, &delta));
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
if (op_data->noop) {
return kTfLiteOk;
}
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
TF_LITE_ENSURE_OK(context,
ResizeOutput(context, start, limit, delta, output));
}
return EvalImpl(context, start, delta, output);
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new OpData;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
}
}
TfLiteRegistration* Register_RANGE() {
static TfLiteRegistration r = {range::Init, range::Free, range::Prepare,
range::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
template <typename T>
class RangeOpModel : public SingleOpModel {
public:
explicit RangeOpModel(const TensorType& dtype) {
start_ = AddInput(dtype);
limit_ = AddInput(dtype);
delta_ = AddInput(dtype);
output_ = AddOutput(dtype);
SetBuiltinOp(BuiltinOperator_RANGE, BuiltinOptions_RangeOptions,
CreateRangeOptions(builder_).Union());
BuildInterpreter({GetShape(start_), GetShape(limit_), GetShape(delta_)});
}
explicit RangeOpModel(const TensorType& dtype, const std::vector<T>& start,
const std::vector<T>& limit,
const std::vector<T>& delta) {
start_ = AddConstInput(dtype, start);
limit_ = AddConstInput(dtype, limit);
delta_ = AddConstInput(dtype, delta);
output_ = AddOutput(dtype);
SetBuiltinOp(BuiltinOperator_RANGE, BuiltinOptions_RangeOptions,
CreateRangeOptions(builder_).Union());
BuildInterpreter({GetShape(start_), GetShape(limit_), GetShape(delta_)});
}
int start() { return start_; }
int limit() { return limit_; }
int delta() { return delta_; }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int start_;
int limit_;
int delta_;
int output_;
};
TEST(RangeOpModel, Simple) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {0});
model.PopulateTensor<int32_t>(model.limit(), {4});
model.PopulateTensor<int32_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, SimpleConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, DeltaGreaterThanOne) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {2});
model.PopulateTensor<int32_t>(model.limit(), {9});
model.PopulateTensor<int32_t>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, DeltaGreaterThanOneConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, NegativeDelta) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {10});
model.PopulateTensor<int32_t>(model.limit(), {3});
model.PopulateTensor<int32_t>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, NegativeDeltaConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, FloatSimple) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {0});
model.PopulateTensor<float>(model.limit(), {4});
model.PopulateTensor<float>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, FloatSimpleConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, FloatDeltaGreaterThanOne) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {2});
model.PopulateTensor<float>(model.limit(), {9});
model.PopulateTensor<float>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, FloatDeltaGreaterThanOneConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, FloatNegativeDelta) {
RangeOpModel<float> model(TensorType_FLOAT32);
model.PopulateTensor<float>(model.start(), {10});
model.PopulateTensor<float>(model.limit(), {3});
model.PopulateTensor<float>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, FloatNegativeDeltaConst) {
RangeOpModel<float> model(TensorType_FLOAT32, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, EmptyOutput) {
RangeOpModel<int32_t> model(TensorType_INT32);
model.PopulateTensor<int32_t>(model.start(), {0});
model.PopulateTensor<int32_t>(model.limit(), {0});
model.PopulateTensor<int32_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, EmptyOutputConst) {
RangeOpModel<int32_t> model(TensorType_INT32, {0}, {0}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, Int64Simple) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {0});
model.PopulateTensor<int64_t>(model.limit(), {4});
model.PopulateTensor<int64_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, Int64SimpleConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {0}, {4}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(0, 1, 2, 3));
}
TEST(RangeOpModel, Int64DeltaGreaterThanOne) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {2});
model.PopulateTensor<int64_t>(model.limit(), {9});
model.PopulateTensor<int64_t>(model.delta(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, Int64DeltaGreaterThanOneConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {2}, {9}, {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4));
EXPECT_THAT(model.GetOutput(), ElementsAre(2, 4, 6, 8));
}
TEST(RangeOpModel, Int64NegativeDelta) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {10});
model.PopulateTensor<int64_t>(model.limit(), {3});
model.PopulateTensor<int64_t>(model.delta(), {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, Int64NegativeDeltaConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {10}, {3}, {-3});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3));
EXPECT_THAT(model.GetOutput(), ElementsAre(10, 7, 4));
}
TEST(RangeOpModel, Int64EmptyOutput) {
RangeOpModel<int64_t> model(TensorType_INT64);
model.PopulateTensor<int64_t>(model.start(), {0});
model.PopulateTensor<int64_t>(model.limit(), {0});
model.PopulateTensor<int64_t>(model.delta(), {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
TEST(RangeOpModel, Int64EmptyOutputConst) {
RangeOpModel<int64_t> model(TensorType_INT64, {0}, {0}, {1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(0));
EXPECT_THAT(model.GetOutput(), ElementsAre());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/range.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/range_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
944a5e29-2e4e-4401-9e0f-8585788d40e5 | cpp | tensorflow/tensorflow | kernel_stats_utils | tensorflow/core/profiler/utils/kernel_stats_utils.cc | tensorflow/core/profiler/utils/kernel_stats_utils_test.cc | #include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <algorithm>
#include <string>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
const int kMaxNumOfKernels = 1000;
constexpr absl::string_view kTensorCoreKernelNamePatterns[] = {
"16816",
"c1688",
"conv1x1",
"conv2d_c1_k1",
"dgrad_1x1_stride_2x2",
"direct_group",
"first_layer_wgrad_kernel",
"h1688",
"h884",
"hmma",
"i16832",
"i8816",
"s884",
"s1688",
"xmma_gemm",
"xmma_implicit_gemm",
"xmma_sparse_conv",
"xmma_sparse_gemm",
"xmma_warp_specialized_implicit_gemm"};
}
void ParseKernelLaunchParams(absl::string_view xstat_kernel_details,
KernelReport* kernel) {
const std::vector<absl::string_view> params =
absl::StrSplit(xstat_kernel_details, absl::ByAnyChar(" \n"));
constexpr uint32 kNumDimensions = 3;
for (uint32 dim = 0; dim < kNumDimensions; ++dim) {
kernel->add_block_dim(1);
kernel->add_grid_dim(1);
}
for (const auto& param : params) {
const std::vector<absl::string_view> key_value = absl::StrSplit(param, ':');
if (key_value.size() != 2) {
continue;
}
absl::string_view key = key_value[0];
absl::string_view value_str = key_value[1];
uint32 value = 0;
double pct = 0.0;
if (key == "regs" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_registers_per_thread(value);
} else if (key == "static_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_static_shmem_bytes(value);
} else if (key == "dynamic_shared" && absl::SimpleAtoi(value_str, &value)) {
kernel->set_dynamic_shmem_bytes(value);
} else if (key == "block") {
const std::vector<absl::string_view>& block =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (block.size() == 3 && absl::SimpleAtoi(block[0], &tmp[0]) &&
absl::SimpleAtoi(block[1], &tmp[1]) &&
absl::SimpleAtoi(block[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_block_dim()->begin());
}
} else if (key == "grid") {
const std::vector<absl::string_view>& grid =
absl::StrSplit(value_str, ',');
uint32 tmp[3];
if (grid.size() == 3 && absl::SimpleAtoi(grid[0], &tmp[0]) &&
absl::SimpleAtoi(grid[1], &tmp[1]) &&
absl::SimpleAtoi(grid[2], &tmp[2])) {
std::copy_n(tmp, 3, kernel->mutable_grid_dim()->begin());
}
} else if (key == "occ_pct" && absl::SimpleAtod(value_str, &pct)) {
kernel->set_occupancy_pct(pct);
}
}
}
bool IsKernelUsingTensorCore(absl::string_view kernel_name) {
VLOG(1) << "kernel name: " << kernel_name;
for (absl::string_view pattern : kTensorCoreKernelNamePatterns) {
if (absl::StrContains(kernel_name, pattern)) {
return true;
}
}
return false;
}
bool IsOpTensorCoreEligible(absl::string_view tf_op_name) {
return false
|| absl::EndsWith(tf_op_name, "Conv2D")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropFilter")
|| absl::EndsWith(tf_op_name, "Conv2DBackpropInput")
|| absl::EndsWith(tf_op_name, "Conv3D")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNative")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropFilter")
|| absl::EndsWith(tf_op_name, "DepthwiseConv2dNativeBackpropInput")
|| absl::StrContains(tf_op_name, "BatchMatMul")
|| absl::EndsWith(tf_op_name, "/MatMul")
|| absl::EndsWith(tf_op_name, "FusedMatMul")
|| absl::EndsWith(tf_op_name, "/CudnnRNN")
|| absl::StrContains(tf_op_name, "CudnnRNNV")
|| absl::StrContains(tf_op_name, "CudnnRNNForward")
|| absl::StrContains(tf_op_name, "CudnnRNNBackprop")
|| absl::EndsWith(tf_op_name, "XlaDot")
|| absl::EndsWith(tf_op_name, "XlaDotV2");
}
bool IsEinsumTensorCoreEligible(absl::string_view equation) {
if (equation.empty()) {
return false;
}
const std::vector<absl::string_view> input_output =
absl::StrSplit(equation, "->");
if (input_output.size() != 2) {
return false;
}
const std::vector<absl::string_view> lhs_rhs =
absl::StrSplit(input_output[0], ',');
return lhs_rhs.size() == 2;
}
bool KernelReportLessThanComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
auto lhs_tuple = std::make_tuple(
lhs.name(),
lhs.grid_dim(0),
lhs.grid_dim(1),
lhs.grid_dim(2),
lhs.block_dim(0),
lhs.block_dim(1),
lhs.block_dim(2),
lhs.registers_per_thread(),
lhs.static_shmem_bytes(),
lhs.dynamic_shmem_bytes(),
lhs.is_kernel_using_tensor_core(),
lhs.is_op_tensor_core_eligible(),
lhs.op_name());
auto rhs_tuple = std::make_tuple(
rhs.name(),
rhs.grid_dim(0),
rhs.grid_dim(1),
rhs.grid_dim(2),
rhs.block_dim(0),
rhs.block_dim(1),
rhs.block_dim(2),
rhs.registers_per_thread(),
rhs.static_shmem_bytes(),
rhs.dynamic_shmem_bytes(),
rhs.is_kernel_using_tensor_core(),
rhs.is_op_tensor_core_eligible(),
rhs.op_name());
return lhs_tuple < rhs_tuple;
}
bool KernelReportEqualToComparator::operator()(const KernelReport& lhs,
const KernelReport& rhs) const {
return (
lhs.is_kernel_using_tensor_core() == rhs.is_kernel_using_tensor_core() &&
lhs.is_op_tensor_core_eligible() == rhs.is_op_tensor_core_eligible() &&
lhs.block_dim(0) == rhs.block_dim(0) &&
lhs.block_dim(1) == rhs.block_dim(1) &&
lhs.block_dim(2) == rhs.block_dim(2) &&
lhs.grid_dim(0) == rhs.grid_dim(0) &&
lhs.grid_dim(1) == rhs.grid_dim(1) &&
lhs.grid_dim(2) == rhs.grid_dim(2) &&
lhs.registers_per_thread() == rhs.registers_per_thread() &&
lhs.static_shmem_bytes() == rhs.static_shmem_bytes() &&
lhs.dynamic_shmem_bytes() == rhs.dynamic_shmem_bytes() &&
lhs.name() == rhs.name() &&
lhs.op_name() == rhs.op_name());
}
void SortAndKeepTopKDurationKernelReportsInDb(KernelStatsDb* kernel_stats_db) {
auto comp = [](const KernelReport& lhs, const KernelReport& rhs) {
return lhs.total_duration_ns() > rhs.total_duration_ns() ||
(lhs.total_duration_ns() == rhs.total_duration_ns() &&
KernelReportLessThanComparator()(lhs, rhs));
};
if (kernel_stats_db->reports_size() > kMaxNumOfKernels) {
std::partial_sort(
kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end(), comp);
kernel_stats_db->mutable_reports()->erase(
kernel_stats_db->mutable_reports()->begin() + kMaxNumOfKernels,
kernel_stats_db->mutable_reports()->end());
} else {
std::sort(kernel_stats_db->mutable_reports()->begin(),
kernel_stats_db->mutable_reports()->end(), comp);
}
}
void CopyTopKDurationKernelReportsToDb(const KernelReportMap& reports,
KernelStatsDb* dst) {
std::vector<std::pair<const KernelReport*, const KernelReportValue*>>
kernels_to_sort;
kernels_to_sort.reserve(reports.size());
for (const auto& report_value : reports) {
kernels_to_sort.push_back(
std::make_pair(&report_value.first, &report_value.second));
}
auto comp =
[](const std::pair<const KernelReport*, const KernelReportValue*>& lhs,
const std::pair<const KernelReport*, const KernelReportValue*>& rhs) {
return lhs.second->total_duration_ns > rhs.second->total_duration_ns ||
(lhs.second->total_duration_ns ==
rhs.second->total_duration_ns &&
KernelReportLessThanComparator()(*lhs.first, *rhs.first));
};
if (kernels_to_sort.size() > kMaxNumOfKernels) {
absl::c_partial_sort(kernels_to_sort,
kernels_to_sort.begin() + kMaxNumOfKernels, comp);
} else {
absl::c_sort(kernels_to_sort, comp);
}
int copy_size =
std::min(kMaxNumOfKernels, static_cast<int>(kernels_to_sort.size()));
for (int i = 0; i < copy_size; i++) {
KernelReport* report = dst->add_reports();
*report = *kernels_to_sort[i].first;
const KernelReportValue& kernel_value = *kernels_to_sort[i].second;
report->set_occurrences(kernel_value.occurrences);
report->set_min_duration_ns(kernel_value.min_duration_ns);
report->set_max_duration_ns(kernel_value.max_duration_ns);
report->set_total_duration_ns(kernel_value.total_duration_ns);
}
}
void InsertOrUpdateKernelReport(const KernelReport& kernel,
const KernelReportValue& value,
KernelReportMap* dst) {
KernelReportValue& element = (*dst)[kernel];
if (element.occurrences == 0) {
element = value;
} else {
element.total_duration_ns += value.total_duration_ns;
element.min_duration_ns =
std::min(element.min_duration_ns, value.min_duration_ns);
element.max_duration_ns =
std::max(element.max_duration_ns, value.max_duration_ns);
element.occurrences += value.occurrences;
}
}
void MergeKernelReports(const KernelReportMap& reports, KernelReportMap* dst) {
for (auto& kernel_value : reports) {
InsertOrUpdateKernelReport(kernel_value.first, kernel_value.second, dst);
}
}
KernelStatsByOpName GroupKernelReportsByOpName(
const KernelStatsDb& kernel_stats_db) {
KernelStatsByOpName op_level_kernel_stats;
for (const KernelReport& kernel_report : kernel_stats_db.reports()) {
auto ret = op_level_kernel_stats.emplace(kernel_report.op_name(),
OpLevelKernelStats());
if (ret.second) {
OpLevelKernelStats& stats = ret.first->second;
stats.is_op_tensor_core_eligible =
kernel_report.is_op_tensor_core_eligible();
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
} else {
OpLevelKernelStats& stats = ret.first->second;
DCHECK_EQ(stats.is_op_tensor_core_eligible,
kernel_report.is_op_tensor_core_eligible());
stats.total_duration_ns += kernel_report.total_duration_ns();
if (kernel_report.is_kernel_using_tensor_core()) {
stats.tensor_core_duration_ns += kernel_report.total_duration_ns();
}
}
}
return op_level_kernel_stats;
}
}
} | #include "tensorflow/core/profiler/utils/kernel_stats_utils.h"
#include <gmock/gmock.h>
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/kernel_stats.pb.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::FieldsAre;
TEST(KernelStatsUtilsTest, TestGroupKernelReportsByOpName) {
KernelStatsDb kernel_stats_db;
KernelReport* kernel_report_1 = kernel_stats_db.add_reports();
kernel_report_1->set_name("op1_kernel1");
kernel_report_1->set_op_name("op1");
kernel_report_1->set_total_duration_ns(1000);
kernel_report_1->set_is_kernel_using_tensor_core(true);
kernel_report_1->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_2 = kernel_stats_db.add_reports();
kernel_report_2->set_name("op1_kernel2");
kernel_report_2->set_op_name("op1");
kernel_report_2->set_total_duration_ns(1000);
kernel_report_2->set_is_kernel_using_tensor_core(false);
kernel_report_2->set_is_op_tensor_core_eligible(true);
KernelReport* kernel_report_3 = kernel_stats_db.add_reports();
kernel_report_3->set_name("op2_kernel1");
kernel_report_3->set_op_name("op2");
kernel_report_3->set_total_duration_ns(100);
kernel_report_3->set_is_kernel_using_tensor_core(false);
kernel_report_3->set_is_op_tensor_core_eligible(false);
KernelStatsByOpName kernel_stats_by_op_name =
GroupKernelReportsByOpName(kernel_stats_db);
ASSERT_EQ(kernel_stats_by_op_name.size(), 2);
auto iter1 = kernel_stats_by_op_name.find("op1");
auto iter2 = kernel_stats_by_op_name.find("op2");
ASSERT_NE(iter1, kernel_stats_by_op_name.end());
ASSERT_NE(iter2, kernel_stats_by_op_name.end());
const OpLevelKernelStats& op1_stats = iter1->second;
const OpLevelKernelStats& op2_stats = iter2->second;
EXPECT_EQ(op1_stats.is_op_tensor_core_eligible, true);
EXPECT_EQ(op1_stats.total_duration_ns, 2000);
EXPECT_EQ(op1_stats.tensor_core_duration_ns, 1000);
EXPECT_EQ(op2_stats.is_op_tensor_core_eligible, false);
EXPECT_EQ(op2_stats.total_duration_ns, 100);
EXPECT_EQ(op2_stats.tensor_core_duration_ns, 0);
}
TEST(KernelStatsUtilsTest, KernelDetailsXStatParser) {
xla::profiler::KernelDetails kernel_info;
kernel_info.registers_per_thread = 10;
kernel_info.static_shared_memory_usage = 128;
kernel_info.dynamic_shared_memory_usage = 256;
kernel_info.block_x = 32;
kernel_info.block_y = 8;
kernel_info.block_z = 4;
kernel_info.grid_x = 3;
kernel_info.grid_y = 2;
kernel_info.grid_z = 1;
const double occupancy_pct = 50.0;
std::string xstat_kernel_details = ToXStat(kernel_info, occupancy_pct);
KernelReport kernel;
ParseKernelLaunchParams(xstat_kernel_details, &kernel);
EXPECT_EQ(kernel.registers_per_thread(), 10);
EXPECT_EQ(kernel.static_shmem_bytes(), 128);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 256);
EXPECT_EQ(kernel.block_dim()[0], 32);
EXPECT_EQ(kernel.block_dim()[1], 8);
EXPECT_EQ(kernel.block_dim()[2], 4);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
}
TEST(KernelStatsUtilsTest, KernelDetailsTokenizer) {
KernelReport kernel;
absl::string_view kernel_details_0 = "odd grid:3,2,1";
ParseKernelLaunchParams(kernel_details_0, &kernel);
EXPECT_EQ(kernel.grid_dim()[0], 3);
EXPECT_EQ(kernel.grid_dim()[1], 2);
EXPECT_EQ(kernel.grid_dim()[2], 1);
absl::string_view kernel_details_1 = "block:6,5,4 odd ";
ParseKernelLaunchParams(kernel_details_1, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 6);
EXPECT_EQ(kernel.block_dim()[1], 5);
EXPECT_EQ(kernel.block_dim()[2], 4);
absl::string_view kernel_details_2 = "block:1,2,3 odd grid:4,5,6";
ParseKernelLaunchParams(kernel_details_2, &kernel);
EXPECT_EQ(kernel.block_dim()[0], 1);
EXPECT_EQ(kernel.block_dim()[1], 2);
EXPECT_EQ(kernel.block_dim()[2], 3);
EXPECT_EQ(kernel.grid_dim()[0], 4);
EXPECT_EQ(kernel.grid_dim()[1], 5);
EXPECT_EQ(kernel.grid_dim()[2], 6);
absl::string_view kernel_details_3 = "static_shared:7 dynamic_shared:8";
ParseKernelLaunchParams(kernel_details_3, &kernel);
EXPECT_EQ(kernel.static_shmem_bytes(), 7);
EXPECT_EQ(kernel.dynamic_shmem_bytes(), 8);
}
TEST(KernelStatsUtilsTest, TestInsertOrUpdateKernelReport) {
KernelReport kr;
kr.set_name("op1_kernel1");
kr.set_op_name("op1");
kr.add_block_dim(32);
kr.add_block_dim(8);
kr.add_block_dim(4);
kr.add_grid_dim(3);
kr.add_grid_dim(2);
kr.add_grid_dim(1);
KernelReportValue krv1;
krv1.total_duration_ns = 1700;
krv1.min_duration_ns = 500;
krv1.max_duration_ns = 1200;
krv1.occurrences = 2;
KernelReportValue krv2;
krv2.total_duration_ns = 900;
krv2.min_duration_ns = 900;
krv2.max_duration_ns = 900;
krv2.occurrences = 1;
KernelReportMap dst1;
InsertOrUpdateKernelReport(kr, krv1, &dst1);
InsertOrUpdateKernelReport(kr, krv2, &dst1);
EXPECT_THAT(dst1[kr], FieldsAre(2600, 500, 1200, 3));
KernelReportMap dst2;
InsertOrUpdateKernelReport(kr, krv2, &dst2);
InsertOrUpdateKernelReport(kr, krv1, &dst2);
EXPECT_THAT(dst2[kr], FieldsAre(2600, 500, 1200, 3));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/kernel_stats_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/utils/kernel_stats_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
177c49f7-cafe-4e0f-86a9-c448609d3dd8 | cpp | google/tensorstore | optional_object | tensorstore/internal/json_binding/optional_object.h | tensorstore/internal/json_binding/optional_object_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_OPTIONAL_OBJECT_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_OPTIONAL_OBJECT_H_
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/json_binding.h"
namespace tensorstore {
namespace internal_json_binding {
template <typename ObjectBinder>
constexpr auto OptionalObject(ObjectBinder binder) {
return [binder = std::move(binder)](auto is_loading, const auto& options,
auto* obj, auto* j) -> absl::Status {
::nlohmann::json::object_t json_obj;
if constexpr (is_loading) {
if (!j->is_discarded()) {
if (auto* ptr = j->template get_ptr<::nlohmann::json::object_t*>();
ptr) {
json_obj = std::move(*ptr);
} else {
return internal_json::ExpectedError(*j, "object");
}
}
}
if (auto status = internal_json_binding::Object(binder)(is_loading, options,
obj, &json_obj);
!status.ok()) {
return status;
}
if constexpr (!is_loading) {
if (!json_obj.empty()) {
*j = std::move(json_obj);
} else {
*j = ::nlohmann::json::value_t::discarded;
}
}
return absl::OkStatus();
};
}
}
}
#endif | #include "tensorstore/internal/json_binding/optional_object.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace jb = tensorstore::internal_json_binding;
namespace {
using ::tensorstore::MatchesStatus;
TEST(JsonBindingTest, RoundTrip) {
tensorstore::TestJsonBinderRoundTrip<::nlohmann::json::object_t>(
{
{{}, ::nlohmann::json(::nlohmann::json::value_t::discarded)},
{{{"a", 1}, {"b", 2}}, {{"a", 1}, {"b", 2}}},
},
jb::OptionalObject(jb::DefaultBinder<>));
}
TEST(JsonBindingTest, Invalid) {
tensorstore::TestJsonBinderFromJson<::nlohmann::json::object_t>(
{
{"abc", MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: \"abc\"")},
},
jb::OptionalObject(jb::DefaultBinder<>));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/optional_object.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/optional_object_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b9800ed6-bc3e-42db-a5d2-71bbee890890 | cpp | tensorflow/tensorflow | resize_bicubic_op | tensorflow/core/kernels/image/resize_bicubic_op.cc | tensorflow/core/kernels/image/resize_bicubic_op_test.cc | #define EIGEN_USE_THREADS
#include <math.h>
#include <algorithm>
#include <array>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
namespace {
static const int64_t kTableSize = (1 << 10);
const float* InitCoeffsTable(const double a) {
float* coeffs_table = new float[(kTableSize + 1) * 2];
for (int i = 0; i <= kTableSize; ++i) {
float x = i * 1.0 / kTableSize;
coeffs_table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
coeffs_table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
return coeffs_table;
}
const float* GetCoeffsTable(const bool use_keys_cubic) {
if (use_keys_cubic) {
static const float* coeffs_table = InitCoeffsTable(-0.5f);
return coeffs_table;
} else {
static const float* coeffs_table = InitCoeffsTable(-0.75f);
return coeffs_table;
}
}
inline int64_t Bound(int64_t val, int64_t limit) {
return std::min(limit - 1, std::max(int64_t{0}, val));
}
struct WeightsAndIndices {
float weight_0;
float weight_1;
float weight_2;
float weight_3;
int64_t index_0;
int64_t index_1;
int64_t index_2;
int64_t index_3;
int advance;
};
template <typename Scaler, bool use_keys_cubic>
inline void GetWeightsAndIndices(const float scale, const int64_t out_loc,
const int64_t limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const int64_t in_loc = std::floor(in_loc_f);
const float delta = in_loc_f - in_loc;
const int64_t offset = lrintf(delta * kTableSize);
const float* coeffs_table = GetCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
out->index_0 = Bound(in_loc - 1, limit);
out->weight_0 =
(out->index_0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->index_1 = Bound(in_loc, limit);
out->weight_1 = (out->index_1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->index_2 = Bound(in_loc + 1, limit);
out->weight_2 =
(out->index_2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->index_3 = Bound(in_loc + 2, limit);
out->weight_3 = (out->index_3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->weight_0 + out->weight_1 + out->weight_2 + out->weight_3;
if (std::abs(weight_sum) >= 1000.0f * std::numeric_limits<float>::min()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->weight_0 *= one_over_weight_sum;
out->weight_1 *= one_over_weight_sum;
out->weight_2 *= one_over_weight_sum;
out->weight_3 *= one_over_weight_sum;
}
} else {
out->weight_0 = coeffs_table[offset * 2 + 1];
out->weight_1 = coeffs_table[offset * 2];
out->weight_2 = coeffs_table[(kTableSize - offset) * 2];
out->weight_3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->index_0 = Bound(in_loc - 1, limit);
out->index_1 = Bound(in_loc, limit);
out->index_2 = Bound(in_loc + 1, limit);
out->index_3 = Bound(in_loc + 2, limit);
}
}
template <typename T>
inline float Interpolate1D(const float weight_0, const float weight_1,
const float weight_2, const float weight_3,
const T value_0, const T value_1, const T value_2,
const T value_3) {
return static_cast<float>(value_0) * weight_0 +
static_cast<float>(value_1) * weight_1 +
static_cast<float>(value_2) * weight_2 +
static_cast<float>(value_3) * weight_3;
}
static float Compute(float values_[4], const float xw_0, const float xw_1,
const float xw_2, const float xw_3) {
return Interpolate1D(xw_0, xw_1, xw_2, xw_3, values_[0], values_[1],
values_[2], values_[3]);
}
class CachedInterpolationCalculator {
public:
CachedInterpolationCalculator() : indexes_{-1, -1, -1, -1} {}
inline int Advance(const int64_t x_0, const int64_t x_1, const int64_t x_2,
const int64_t x_3) {
const std::array<int64_t, 4> new_x_indices{{x_0, x_1, x_2, x_3}};
int cached_values_hand = 0;
int new_indices_hand = 0;
while (cached_values_hand < 4) {
if (indexes_[cached_values_hand] == new_x_indices[new_indices_hand]) {
if (new_indices_hand < cached_values_hand) {
indexes_[new_indices_hand] = indexes_[cached_values_hand];
}
cached_values_hand++;
new_indices_hand++;
} else {
cached_values_hand++;
}
}
switch (new_indices_hand) {
case 0:
indexes_[0] = x_0;
TF_FALLTHROUGH_INTENDED;
case 1:
indexes_[1] = x_1;
TF_FALLTHROUGH_INTENDED;
case 2:
indexes_[2] = x_2;
TF_FALLTHROUGH_INTENDED;
case 3:
indexes_[3] = x_3;
break;
}
return new_indices_hand;
}
private:
int64_t indexes_[4];
};
static void ComputeXWeightsAndIndices(const ImageResizerState& resizer_state,
const bool half_pixel_centers,
std::vector<WeightsAndIndices>* x_wais) {
CachedInterpolationCalculator calc;
if (half_pixel_centers) {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
GetWeightsAndIndices<HalfPixelScaler, true>(
resizer_state.width_scale, x, resizer_state.in_width, &(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
} else {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
GetWeightsAndIndices<LegacyScaler, false>(
resizer_state.width_scale, x, resizer_state.in_width, &(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
}
for (int x = 0; x < resizer_state.out_width; ++x) {
(*x_wais)[x].index_0 *= resizer_state.channels;
(*x_wais)[x].index_1 *= resizer_state.channels;
(*x_wais)[x].index_2 *= resizer_state.channels;
(*x_wais)[x].index_3 *= resizer_state.channels;
}
}
static void ComputeGradientXWeightsAndIndices(
const ImageResizerGradientState& resizer_state,
const bool half_pixel_centers, std::vector<WeightsAndIndices>* x_wais) {
CachedInterpolationCalculator calc;
if (half_pixel_centers) {
for (int64_t x = 0; x < resizer_state.resized_width; ++x) {
GetWeightsAndIndices<HalfPixelScaler, true>(resizer_state.width_scale, x,
resizer_state.original_width,
&(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
} else {
for (int64_t x = 0; x < resizer_state.resized_width; ++x) {
GetWeightsAndIndices<LegacyScaler, false>(resizer_state.width_scale, x,
resizer_state.original_width,
&(*x_wais)[x]);
auto& x_wai = (*x_wais)[x];
x_wai.advance = calc.Advance(x_wai.index_0, x_wai.index_1, x_wai.index_2,
x_wai.index_3);
}
}
}
template <typename T>
static EIGEN_ALWAYS_INLINE float ComputeYInterpolation(
int which, int channel_num, const WeightsAndIndices& y_wai,
const T* y_ptr_0, const T* y_ptr_1, const T* y_ptr_2, const T* y_ptr_3,
const WeightsAndIndices& x_wai) {
int x_index;
switch (which) {
case 0:
x_index = x_wai.index_0;
break;
case 1:
x_index = x_wai.index_1;
break;
case 2:
x_index = x_wai.index_2;
break;
default:
x_index = x_wai.index_3;
break;
}
const int64_t pt_index = x_index + channel_num;
return Interpolate1D<T>(y_wai.weight_0, y_wai.weight_1, y_wai.weight_2,
y_wai.weight_3, y_ptr_0[pt_index], y_ptr_1[pt_index],
y_ptr_2[pt_index], y_ptr_3[pt_index]);
}
template <typename T>
inline void interpolate_with_caching(
const typename TTypes<T, 4>::ConstTensor& input_data,
const ImageResizerState& resizer_state, const bool half_pixel_centers,
typename TTypes<float, 4>::Tensor output_data) {
std::vector<WeightsAndIndices> x_wais(resizer_state.out_width);
ComputeXWeightsAndIndices(resizer_state, half_pixel_centers, &x_wais);
const auto num_channels = resizer_state.channels;
const int64_t in_row_width = resizer_state.in_width * num_channels;
const int64_t in_batch_width = resizer_state.in_height * in_row_width;
const T* input_b_ptr = input_data.data();
float* output_y_ptr = output_data.data();
std::vector<float> cached_value(num_channels == 3 ? 0 : 4 * num_channels, 0);
for (int64_t b = 0; b < resizer_state.batch_size;
++b, input_b_ptr += in_batch_width) {
for (int64_t y = 0; y < resizer_state.out_height;
++y, output_y_ptr += resizer_state.out_width * num_channels) {
WeightsAndIndices y_wai;
if (half_pixel_centers) {
GetWeightsAndIndices<HalfPixelScaler, true>(
resizer_state.height_scale, y, resizer_state.in_height, &y_wai);
} else {
GetWeightsAndIndices<LegacyScaler, false>(
resizer_state.height_scale, y, resizer_state.in_height, &y_wai);
}
const T* y_ptr_0 = input_b_ptr + y_wai.index_0 * in_row_width;
const T* y_ptr_1 = input_b_ptr + y_wai.index_1 * in_row_width;
const T* y_ptr_2 = input_b_ptr + y_wai.index_2 * in_row_width;
const T* y_ptr_3 = input_b_ptr + y_wai.index_3 * in_row_width;
if (num_channels == 3) {
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
switch (x_wai.advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
switch (x_wai.advance) {
case 0:
cached_value_0[0] = ComputeYInterpolation(
0, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[0] = ComputeYInterpolation(
0, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[0] = ComputeYInterpolation(
0, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 1:
cached_value_0[1] = ComputeYInterpolation(
1, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[1] = ComputeYInterpolation(
1, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[1] = ComputeYInterpolation(
1, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 2:
cached_value_0[2] = ComputeYInterpolation(
2, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[2] = ComputeYInterpolation(
2, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[2] = ComputeYInterpolation(
2, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
TF_FALLTHROUGH_INTENDED;
case 3:
cached_value_0[3] = ComputeYInterpolation(
3, 0, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_1[3] = ComputeYInterpolation(
3, 1, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
cached_value_2[3] = ComputeYInterpolation(
3, 2, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
break;
}
output_y_ptr[x * num_channels + 0] =
Compute(cached_value_0, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
output_y_ptr[x * num_channels + 1] =
Compute(cached_value_1, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
output_y_ptr[x * num_channels + 2] =
Compute(cached_value_2, x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
}
} else {
for (int64_t x = 0; x < resizer_state.out_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
switch (x_wai.advance) {
case 3:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 1];
cached_value[4 * c + 1] = cached_value[4 * c + 2];
cached_value[4 * c + 2] = cached_value[4 * c + 3];
}
break;
case 2:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 2];
cached_value[4 * c + 1] = cached_value[4 * c + 3];
}
break;
case 1: {
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = cached_value[4 * c + 3];
}
break;
}
}
switch (x_wai.advance) {
case 0:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 0] = ComputeYInterpolation(
0, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 1:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 1] = ComputeYInterpolation(
1, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 2:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 2] = ComputeYInterpolation(
2, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
TF_FALLTHROUGH_INTENDED;
case 3:
for (int64_t c = 0; c < num_channels; ++c) {
cached_value[4 * c + 3] = ComputeYInterpolation(
3, c, y_wai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, x_wai);
}
break;
}
for (int64_t c = 0; c < num_channels; ++c) {
output_y_ptr[x * num_channels + c] =
Compute(&cached_value[4 * c], x_wai.weight_0, x_wai.weight_1,
x_wai.weight_2, x_wai.weight_3);
}
}
}
}
}
}
template <typename T>
inline void ResizeBicubicGrad(typename TTypes<float, 4>::ConstTensor input_grad,
const ImageResizerGradientState& resizer_state,
const bool half_pixel_centers,
typename TTypes<T, 4>::Tensor output_grad) {
const float height_scale = resizer_state.height_scale;
const int64_t original_height = resizer_state.original_height;
const int channels = resizer_state.channels;
const int64_t resized_width = resizer_state.resized_width;
const int64_t resized_height = resizer_state.resized_height;
output_grad.setZero();
std::vector<WeightsAndIndices> x_wais(resizer_state.resized_width);
ComputeGradientXWeightsAndIndices(resizer_state, half_pixel_centers, &x_wais);
for (int64_t b = 0; b < resizer_state.batch_size; ++b) {
for (int64_t y = 0; y < resized_height; ++y) {
WeightsAndIndices y_wai;
if (half_pixel_centers) {
GetWeightsAndIndices<HalfPixelScaler, true>(height_scale, y,
original_height, &y_wai);
} else {
GetWeightsAndIndices<LegacyScaler, false>(height_scale, y,
original_height, &y_wai);
}
for (int64_t x = 0; x < resized_width; ++x) {
const WeightsAndIndices& x_wai = x_wais[x];
for (int64_t c = 0; c < channels; ++c) {
T curr_input_grad = input_grad(b, y, x, c);
output_grad(b, y_wai.index_0, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_0);
output_grad(b, y_wai.index_0, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_1);
output_grad(b, y_wai.index_0, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_2);
output_grad(b, y_wai.index_0, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_0 * x_wai.weight_3);
output_grad(b, y_wai.index_1, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_0);
output_grad(b, y_wai.index_1, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_1);
output_grad(b, y_wai.index_1, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_2);
output_grad(b, y_wai.index_1, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_1 * x_wai.weight_3);
output_grad(b, y_wai.index_2, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_0);
output_grad(b, y_wai.index_2, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_1);
output_grad(b, y_wai.index_2, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_2);
output_grad(b, y_wai.index_2, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_2 * x_wai.weight_3);
output_grad(b, y_wai.index_3, x_wai.index_0, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_0);
output_grad(b, y_wai.index_3, x_wai.index_1, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_1);
output_grad(b, y_wai.index_3, x_wai.index_2, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_2);
output_grad(b, y_wai.index_3, x_wai.index_3, c) +=
T(curr_input_grad * y_wai.weight_3 * x_wai.weight_3);
}
}
}
}
}
}
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename Device, typename T>
class ResizeBicubicOp : public OpKernel {
public:
explicit ResizeBicubicOp(OpKernelConstruction* context) : OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
TTypes<float, 4>::Tensor output_data = st.output->tensor<float, 4>();
interpolate_with_caching<T>(input_data, st, half_pixel_centers_,
output_data);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
template <typename Device, typename T>
class ResizeBicubicOpGrad : public OpKernel {
public:
explicit ResizeBicubicOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerGradientState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
TTypes<float, 4>::ConstTensor input_grad =
context->input(0).tensor<float, 4>();
typename TTypes<T, 4>::Tensor output_grad(st.output->tensor<T, 4>());
ResizeBicubicGrad<T>(input_grad, st, half_pixel_centers_, output_grad);
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeBicubic") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeBicubicOp<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#define REGISTER_GRAD_KERNEL(T) \
REGISTER_KERNEL_BUILDER( \
Name("ResizeBicubicGrad").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
ResizeBicubicOpGrad<CPUDevice, T>);
TF_CALL_float(REGISTER_GRAD_KERNEL);
TF_CALL_double(REGISTER_GRAD_KERNEL);
#undef REGISTER_GRAD_KERNEL
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class ResizeBicubicOpTest : public OpsTestBase {
protected:
ResizeBicubicOpTest() {
TF_EXPECT_OK(NodeDefBuilder("resize_bicubic_op", "ResizeBicubic")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", false)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
const Tensor* SetRandomImageInput(const TensorShape& shape) {
inputs_.clear();
CHECK_EQ(shape.dims(), 4) << "All images must have 4 dimensions.";
bool is_ref = IsRefType(input_types_[inputs_.size()]);
Tensor* input = new Tensor(device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(), shape);
input->flat<float>().setRandom();
tensors_.push_back(input);
if (is_ref) {
CHECK_EQ(RemoveRefType(input_types_[inputs_.size()]),
DataTypeToEnum<float>::v());
inputs_.push_back({&lock_for_refs_, input});
} else {
CHECK_EQ(input_types_[inputs_.size()], DataTypeToEnum<float>::v());
inputs_.push_back({nullptr, input});
}
return input;
}
private:
static constexpr int64_t kTableSize = (1 << 10);
const float* InitCoeffsTable() {
float* coeffs_tab = new float[(kTableSize + 1) * 2];
static const double A = -0.75;
for (int i = 0; i <= kTableSize; ++i) {
float x = i * 1.0 / kTableSize;
coeffs_tab[i * 2] = ((A + 2) * x - (A + 3)) * x * x + 1;
x += 1.0;
coeffs_tab[i * 2 + 1] = ((A * x - 5 * A) * x + 8 * A) * x - 4 * A;
}
return coeffs_tab;
}
const float* GetCoeffsTable() {
static const float* coeffs_tab = InitCoeffsTable();
return coeffs_tab;
}
inline int64_t Bound(int64_t val, int64_t limit) {
return std::min(limit - 1, std::max(int64_t{0}, val));
}
inline void GetWeightsAndIndices(float scale, int64_t out_loc, int64_t limit,
std::array<float, 4>* weights,
std::array<int64_t, 4>* indices) {
const int64_t in_loc = scale * out_loc;
const float in_loc_float = scale * out_loc;
const float delta = in_loc_float - in_loc;
const int64_t offset = lrintf(delta * kTableSize);
const float* coeffs_tab = GetCoeffsTable();
*weights = {{coeffs_tab[offset * 2 + 1], coeffs_tab[offset * 2],
coeffs_tab[(kTableSize - offset) * 2],
coeffs_tab[(kTableSize - offset) * 2 + 1]}};
*indices = {{Bound(in_loc - 1, limit), Bound(in_loc, limit),
Bound(in_loc + 1, limit), Bound(in_loc + 2, limit)}};
}
inline float Interpolate1D(const std::array<float, 4>& weights,
const std::array<float, 4>& values) {
return values[0] * weights[0] + values[1] * weights[1] +
values[2] * weights[2] + values[3] * weights[3];
}
void ResizeBicubicBaseline(TTypes<float, 4>::ConstTensor images,
TTypes<float, 4>::Tensor output) {
const int batch_size = images.dimension(0);
const int64_t in_height = images.dimension(1);
const int64_t in_width = images.dimension(2);
const int channels = images.dimension(3);
ASSERT_EQ(batch_size, output.dimension(0));
ASSERT_EQ(channels, output.dimension(3));
const int64_t out_height = output.dimension(1);
const int64_t out_width = output.dimension(2);
const float height_scale = in_height / static_cast<float>(out_height);
const float width_scale = in_width / static_cast<float>(out_width);
std::array<float, 4> coeff = {{0.0, 0.0, 0.0, 0.0}};
for (int64_t b = 0; b < batch_size; ++b) {
for (int64_t y = 0; y < out_height; ++y) {
std::array<float, 4> y_weights;
std::array<int64_t, 4> y_indices;
GetWeightsAndIndices(height_scale, y, in_height, &y_weights,
&y_indices);
for (int64_t x = 0; x < out_width; ++x) {
std::array<float, 4> x_weights;
std::array<int64_t, 4> x_indices;
GetWeightsAndIndices(width_scale, x, in_width, &x_weights,
&x_indices);
for (int64_t c = 0; c < channels; ++c) {
for (int64_t i = 0; i < 4; ++i) {
const std::array<float, 4> values = {
{static_cast<float>(images(b, y_indices[i], x_indices[0], c)),
static_cast<float>(images(b, y_indices[i], x_indices[1], c)),
static_cast<float>(images(b, y_indices[i], x_indices[2], c)),
static_cast<float>(
images(b, y_indices[i], x_indices[3], c))}};
coeff[i] = Interpolate1D(x_weights, values);
}
output(b, y, x, c) = Interpolate1D(y_weights, coeff);
}
}
}
}
}
protected:
void RunRandomTest(const int batch_size, const int64_t in_height,
const int64_t in_width, const int target_height,
const int target_width, int channels) {
LOG(INFO) << "Running random test " << in_height << "x" << in_width << "x"
<< channels << " to " << target_height << "x" << target_width
<< "x" << channels;
const Tensor* input = SetRandomImageInput(
TensorShape({batch_size, in_height, in_width, channels}));
AddInputFromArray<int32>(TensorShape({2}), {target_height, target_width});
TF_ASSERT_OK(RunOpKernel());
std::unique_ptr<Tensor> expected(new Tensor(
device_->GetAllocator(AllocatorAttributes()),
DataTypeToEnum<float>::v(),
TensorShape({batch_size, target_height, target_width, channels})));
ResizeBicubicBaseline(input->tensor<float, 4>(),
expected->tensor<float, 4>());
test::ExpectTensorNear<float>(*expected, *GetOutput(0), 0.00001);
}
void RunManyRandomTests(int channels) {
for (int batch_size : {1, 2, 5}) {
for (int in_w : {2, 4, 7, 20, 165}) {
for (int in_h : {1, 3, 5, 8, 100, 233}) {
for (int target_height : {1, 2, 3, 50, 113}) {
for (int target_width : {target_height, target_height / 2 + 1}) {
RunRandomTest(batch_size, in_h, in_w, target_height, target_width,
channels);
}
}
}
}
}
}
};
TEST_F(ResizeBicubicOpTest, TestBicubic2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(ResizeBicubicOpTest, TestBicubic2x2To0x0) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {0, 0});
Status s = RunOpKernel();
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(), "output dimensions must be positive"))
<< s;
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom141x186) {
RunRandomTest(2, 141, 186, 299, 299, 1 );
RunRandomTest(2, 141, 186, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom183x229) {
RunRandomTest(2, 183, 229, 299, 299, 1 );
RunRandomTest(2, 183, 229, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestBicubicRandom749x603) {
RunRandomTest(2, 749, 603, 299, 299, 1 );
RunRandomTest(2, 749, 603, 299, 299, 3 );
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes1Channel) {
RunManyRandomTests(1);
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes3Channels) {
RunManyRandomTests(3);
}
TEST_F(ResizeBicubicOpTest, TestAreaRandomDataSeveralInputsSizes4Channels) {
RunManyRandomTests(4);
}
static Graph* ResizeBicubic(int batch_size, int size, int channels,
float scale_y = 0.3, float scale_x = 0.7) {
Graph* g = new Graph(OpRegistry::Global());
Tensor input(DT_FLOAT, TensorShape({batch_size, size, size, channels}));
input.flat<float>().setRandom();
Tensor shape(DT_INT32, TensorShape({2}));
auto shape_t = shape.flat<int32>();
shape_t(0) = scale_y * size;
shape_t(1) = scale_x * size;
test::graph::Binary(g, "ResizeBicubic", test::graph::Constant(g, input),
test::graph::Constant(g, shape));
return g;
}
#define BM_ResizeBicubicDev(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS( \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS); \
} \
BENCHMARK(BM_ResizeBicubic##_##BATCH##_##SIZE##_##CHANNELS);
BM_ResizeBicubicDev(8, 32, 3);
BM_ResizeBicubicDev(8, 128, 3);
BM_ResizeBicubicDev(8, 512, 3);
BM_ResizeBicubicDev(8, 1024, 3);
BM_ResizeBicubicDev(16, 32, 3);
BM_ResizeBicubicDev(16, 128, 3);
BM_ResizeBicubicDev(16, 512, 3);
BM_ResizeBicubicDev(16, 1024, 3);
BM_ResizeBicubicDev(32, 32, 3);
BM_ResizeBicubicDev(32, 128, 3);
BM_ResizeBicubicDev(32, 512, 3);
BM_ResizeBicubicDev(32, 1024, 3);
#define BM_ResizeBicubicExpand(BATCH, SIZE, CHANNELS) \
static void BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS( \
::testing::benchmark::State& state) { \
test::Benchmark("cpu", ResizeBicubic(BATCH, SIZE, CHANNELS, 8, 8), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * BATCH * \
SIZE * SIZE * CHANNELS * 8 * 8); \
} \
BENCHMARK(BM_ResizeBicubicExpand##_##BATCH##_##SIZE##_##CHANNELS);
BM_ResizeBicubicExpand(12, 48, 1);
BM_ResizeBicubicExpand(12, 48, 3);
BM_ResizeBicubicExpand(12, 48, 40);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bicubic_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_bicubic_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
831eba5a-1e8a-4c10-b485-c8cc4caf6274 | cpp | tensorflow/tensorflow | task_internal | tensorflow/lite/core/async/task_internal.cc | tensorflow/lite/core/async/task_internal_test.cc | #include "tensorflow/lite/core/async/task_internal.h"
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include "tensorflow/lite/core/async/async_kernel_internal.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace async {
bool ExecutionTask::GetTensorIdx(TfLiteIoType io_type, const char* name,
int* idx) const {
const std::map<std::string, uint32_t>* map = nullptr;
if (io_type == kTfLiteIoTypeInput) {
map = input_name_to_idx_;
} else {
map = output_name_to_idx_;
}
if (!map) return false;
if (auto it_idx = map->find(name); it_idx != map->end()) {
*idx = it_idx->second;
return true;
}
return false;
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(TfLiteIoType io_type,
const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteNullBufferHandle;
}
return GetBufferHandle(index);
}
TfLiteBufferHandle ExecutionTask::GetBufferHandle(int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.buf;
}
return kTfLiteNullBufferHandle;
}
TfLiteStatus ExecutionTask::SetBufferHandle(TfLiteIoType io_type,
const char* name,
TfLiteBufferHandle handle) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetBufferHandle(index, handle);
}
TfLiteStatus ExecutionTask::SetBufferHandle(int tensor_index,
TfLiteBufferHandle handle) {
io_data_[tensor_index].buf = handle;
return kTfLiteOk;
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
TfLiteIoType io_type, const char* name) const {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return nullptr;
}
return GetSynchronization(index);
}
TfLiteSynchronization* ExecutionTask::GetSynchronization(
int tensor_index) const {
if (auto it = io_data_.find(tensor_index); it != io_data_.end()) {
return it->second.sync;
}
return nullptr;
}
TfLiteStatus ExecutionTask::SetSynchronization(TfLiteIoType io_type,
const char* name,
TfLiteSynchronization* sync) {
int index = 0;
if (!GetTensorIdx(io_type, name, &index)) {
return kTfLiteError;
}
return SetSynchronization(index, sync);
}
TfLiteStatus ExecutionTask::SetSynchronization(int tensor_index,
TfLiteSynchronization* sync) {
io_data_[tensor_index].sync = sync;
return kTfLiteOk;
}
}
}
TfLiteExecutionTask::TfLiteExecutionTask() {
task = std::make_unique<tflite::async::ExecutionTask>();
} | #include "tensorflow/lite/core/async/task_internal.h"
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/async/c/types.h"
#include "tensorflow/lite/core/async/interop/c/types.h"
namespace tflite::async {
TEST(TfLiteExecutionTaskTest, BasicTest) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "x", 42));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeInput, "y", 43));
EXPECT_EQ(kTfLiteOk, task.SetBufferHandle(kTfLiteIoTypeOutput, "a", 44));
EXPECT_EQ(kTfLiteOk, task.SetSynchronization(kTfLiteIoTypeInput, "x", sync));
EXPECT_EQ(42, task.GetBufferHandle(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(43, task.GetBufferHandle(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(44, task.GetBufferHandle(kTfLiteIoTypeOutput, "a"));
EXPECT_EQ(sync, task.GetSynchronization(kTfLiteIoTypeInput, "x"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "y"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "a"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, NameMapUninitialized) {
tflite::async::ExecutionTask task;
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "foo"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "foo"));
}
TEST(TfLiteExecutionTaskTest, NoMatchingName) {
tflite::async::ExecutionTask task;
tflite::async::ExecutionTask::TensorNameMapT input_names;
input_names["x"] = 1;
input_names["y"] = 2;
tflite::async::ExecutionTask::TensorNameMapT output_names;
output_names["a"] = 3;
task.SetInputNameMap(&input_names);
task.SetOutputNameMap(&output_names);
auto* sync = TfLiteSynchronizationCreate();
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeInput, "xx", 42));
EXPECT_EQ(kTfLiteError, task.SetBufferHandle(kTfLiteIoTypeOutput, "aa", 44));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeInput, "xx", sync));
EXPECT_EQ(kTfLiteError,
task.SetSynchronization(kTfLiteIoTypeOutput, "aa", sync));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(kTfLiteNullBufferHandle,
task.GetBufferHandle(kTfLiteIoTypeOutput, "aa"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeInput, "xx"));
EXPECT_EQ(nullptr, task.GetSynchronization(kTfLiteIoTypeOutput, "aa"));
TfLiteSynchronizationDelete(sync);
}
TEST(TfLiteExecutionTaskTest, DelegateData) {
TfLiteAsyncKernel kernel{};
int data = 0;
tflite::async::ExecutionTask task;
EXPECT_EQ(nullptr, task.GetDelegateExecutionData(&kernel));
task.SetDelegateExecutionData(&kernel, &data);
EXPECT_EQ(&data, task.GetDelegateExecutionData(&kernel));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/task_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/async/task_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
59593b95-a75a-4b03-b909-6c7098377862 | cpp | google/tensorstore | json_registry | tensorstore/internal/json_registry.h | tensorstore/internal/json_registry_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_REGISTRY_H_
#define TENSORSTORE_INTERNAL_JSON_REGISTRY_H_
#include <memory>
#include <string>
#include <string_view>
#include <type_traits>
#include <typeindex>
#include <utility>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_registry_fwd.h"
#include "tensorstore/internal/json_registry_impl.h"
#include "tensorstore/json_serialization_options.h"
namespace tensorstore {
namespace internal {
template <typename Base, typename LoadOptions, typename SaveOptions,
typename BasePtr>
class JsonRegistry {
static_assert(std::has_virtual_destructor_v<Base>);
public:
auto KeyBinder() const { return KeyBinderImpl{impl_}; }
constexpr auto RegisteredObjectBinder() const {
return RegisteredObjectBinderImpl{impl_};
}
template <typename MemberName>
auto MemberBinder(MemberName member_name) const {
namespace jb = tensorstore::internal_json_binding;
return jb::Sequence(jb::Member(member_name, this->KeyBinder()),
RegisteredObjectBinder());
}
template <typename T, typename Binder>
void Register(std::string_view id, Binder binder) {
static_assert(std::is_base_of_v<Base, T>);
auto entry =
std::make_unique<internal_json_registry::JsonRegistryImpl::Entry>();
entry->id = std::string(id);
entry->type = &typeid(T);
entry->allocate =
+[](void* obj) { static_cast<BasePtr*>(obj)->reset(new T); };
entry->binder = [binder](
auto is_loading, const void* options, const void* obj,
::nlohmann::json::object_t* j_obj) -> absl::Status {
using Options = std::conditional_t<decltype(is_loading)::value,
LoadOptions, SaveOptions>;
using Obj = std::conditional_t<decltype(is_loading)::value, T, const T>;
return binder(is_loading, *static_cast<const Options*>(options),
const_cast<Obj*>(
static_cast<const Obj*>(static_cast<const Base*>(obj))),
j_obj);
};
impl_.Register(std::move(entry));
}
private:
struct KeyBinderImpl {
const internal_json_registry::JsonRegistryImpl& impl;
template <typename Options>
absl::Status operator()(std::true_type is_loading, const Options& options,
BasePtr* obj, ::nlohmann::json* j) const {
return impl.LoadKey(obj, j);
}
template <typename Ptr, typename Options>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Ptr* obj, ::nlohmann::json* j) const {
static_assert(std::is_convertible_v<decltype(&**obj), const Base*>);
return impl.SaveKey(typeid(**obj), j);
}
};
struct RegisteredObjectBinderImpl {
const internal_json_registry::JsonRegistryImpl& impl;
absl::Status operator()(std::true_type is_loading,
const LoadOptions& options, BasePtr* obj,
::nlohmann::json::object_t* j_obj) const {
if (!*obj) return absl::OkStatus();
return impl.LoadRegisteredObject(typeid(*obj->get()), &options,
static_cast<const Base*>(&**obj), j_obj);
}
template <typename Ptr>
absl::Status operator()(std::false_type is_loading,
const SaveOptions& options, const Ptr* obj,
::nlohmann::json::object_t* j_obj) const {
static_assert(std::is_convertible_v<decltype(&**obj), const Base*>);
if (!*obj) return absl::OkStatus();
return impl.SaveRegisteredObject(typeid(**obj), &options,
static_cast<const Base*>(&**obj), j_obj);
}
};
internal_json_registry::JsonRegistryImpl impl_;
};
}
}
#endif | #include "tensorstore/internal/json_registry.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/no_destructor.h"
#include "absl/status/status.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::internal::JsonRegistry;
class MyInterface
: public tensorstore::internal::AtomicReferenceCount<MyInterface> {
public:
virtual int Whatever() const = 0;
virtual ~MyInterface() = default;
};
class MyInterfacePtr : public IntrusivePtr<MyInterface> {
public:
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(MyInterfacePtr,
tensorstore::JsonSerializationOptions,
tensorstore::JsonSerializationOptions)
};
using Registry =
JsonRegistry<MyInterface, tensorstore::JsonSerializationOptions,
tensorstore::JsonSerializationOptions>;
Registry& GetRegistry() {
static absl::NoDestructor<Registry> registry;
return *registry;
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(MyInterfacePtr, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(GetRegistry().MemberBinder("id"))(is_loading, options, obj,
j);
})
class FooImpl : public MyInterface {
public:
int x;
int Whatever() const override { return x; }
};
class BarImpl : public MyInterface {
public:
float y;
int Whatever() const override { return static_cast<int>(y); }
};
struct FooRegistration {
FooRegistration() {
namespace jb = tensorstore::internal_json_binding;
GetRegistry().Register<FooImpl>(
"foo", jb::Object(jb::Member("x", jb::Projection(&FooImpl::x))));
}
} foo_registration;
struct BarRegistration {
BarRegistration() {
namespace jb = tensorstore::internal_json_binding;
GetRegistry().Register<BarImpl>(
"bar", jb::Object(jb::Member("y", jb::Projection(&BarImpl::y))));
}
} bar_registration;
TEST(RegistryTest, Foo) {
const ::nlohmann::json j{{"id", "foo"}, {"x", 10}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto obj, MyInterfacePtr::FromJson(j));
EXPECT_EQ(10, obj->Whatever());
EXPECT_EQ(j, obj.ToJson());
}
TEST(RegistryTest, Bar) {
const ::nlohmann::json j{{"id", "bar"}, {"y", 42.5}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto obj, MyInterfacePtr::FromJson(j));
EXPECT_EQ(42, obj->Whatever());
EXPECT_EQ(j, obj.ToJson());
}
TEST(RegistryTest, Unknown) {
EXPECT_THAT(MyInterfacePtr::FromJson({{"id", "baz"}, {"y", 42.5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing object member \"id\": "
"\"baz\" is not registered"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_registry.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_registry_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6563a9cb-dea9-48ee-bc96-d7524605a677 | cpp | tensorflow/tensorflow | replicate_per_replica_nodes | tensorflow/core/common_runtime/replicate_per_replica_nodes.cc | tensorflow/core/common_runtime/replicate_per_replica_nodes_test.cc | #include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <algorithm>
#include <queue>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/optimize_cross_host_control_deps.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace {
constexpr int kOptimizeCrossHostEdgesTheshold = 8;
constexpr int kOptimizeCrossHostDataEdgesTheshold = 2;
class ReplicateHelper {
public:
Status InitializeNode(const Node* node, int num_allowed_devices) {
if (replicated_nodes_map_.find(node) != replicated_nodes_map_.end()) {
return errors::InvalidArgument("Node ", node->name(),
" has been replicated.");
}
std::vector<Node*> replicated_nodes(num_allowed_devices, nullptr);
replicated_nodes_map_.emplace(node, std::move(replicated_nodes));
return absl::OkStatus();
}
Status ReplicateNode(const Node* node,
const std::vector<string>& allowed_devices,
int allowed_device_index, Graph* graph) {
auto& replicated_nodes = replicated_nodes_map_.at(node);
if (replicated_nodes[allowed_device_index] != nullptr) {
return absl::OkStatus();
}
const auto& device = allowed_devices.at(allowed_device_index);
NodeDef node_def = node->def();
const string suffix = strings::StrCat("/R", allowed_device_index);
node_def.set_name(graph->NewName(strings::StrCat(node_def.name(), suffix)));
TF_ASSIGN_OR_RETURN(Node * replicated_node, graph->AddNode(node_def));
replicated_node->set_assigned_device_name(device);
if (replicated_node->IsArg()) {
replicated_node->AddAttr("sub_index", allowed_device_index);
}
replicated_nodes[allowed_device_index] = replicated_node;
return absl::OkStatus();
}
void ReplicateFromRegularDeviceToCompositeDevice(const Edge* edge,
Graph* graph) const {
Node* src = edge->src();
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
for (Node* dst : dst_replicated_nodes) {
if (dst == nullptr) {
continue;
}
graph->AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
}
Status ReplicateFromCompositeDeviceToCompositeDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
const std::vector<Node*>& dst_replicated_nodes =
replicated_nodes_map_.at(edge->dst());
if (src_replicated_nodes.size() != dst_replicated_nodes.size()) {
return errors::InvalidArgument(
"Nodes assigned to the same composite device should have the "
"same number of replicated nodes. Found an edge from node ",
edge->src()->name(), " (", src_replicated_nodes.size(),
" replicated nodes) to node ", edge->dst()->name(), " (",
dst_replicated_nodes.size(), " replicated nodes).");
}
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
Node* dst = dst_replicated_nodes.at(i);
if (dst == nullptr) {
continue;
}
TF_RETURN_IF_ERROR(ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
}
return absl::OkStatus();
}
Status ReplicateFromCompositeDeviceToRegularDevice(
const Edge* edge, const std::vector<string>& allowed_devices,
Graph* graph) {
const std::vector<Node*>& src_replicated_nodes =
replicated_nodes_map_.at(edge->src());
Node* dst = edge->dst();
const string& dst_device = dst->assigned_device_name();
bool found_src_node = false;
for (int i = 0; i < allowed_devices.size(); ++i) {
if (allowed_devices.at(i) == dst_device) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
graph->AddEdge(src_replicated_nodes.at(i), edge->src_output(), dst,
edge->dst_input());
found_src_node = true;
break;
}
}
if (!found_src_node) {
for (int i = 0; i < allowed_devices.size(); ++i) {
TF_RETURN_IF_ERROR(
ReplicateNode(edge->src(), allowed_devices, i, graph));
}
if (edge->IsControlEdge()) {
for (Node* replicated_node : src_replicated_nodes) {
graph->AddControlEdge(replicated_node, dst,
true);
}
return absl::OkStatus();
}
if (edge->src()->type_string() == "_Arg") {
NodeDefBuilder pack_builder(
graph->NewName(absl::StrCat(edge->src()->name(), "/Packed")),
"Pack");
const int num_replicas = src_replicated_nodes.size();
pack_builder.Attr("N", num_replicas);
const DataType dtype = edge->src()->output_type(edge->src_output());
pack_builder.Attr("T", dtype);
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.reserve(src_replicated_nodes.size());
for (Node* replicated_node : src_replicated_nodes) {
inputs.emplace_back(NodeDefBuilder::NodeOut{
replicated_node->name(), edge->src_output(), dtype});
}
pack_builder.Input(inputs);
NodeDef pack_def;
TF_RETURN_IF_ERROR(pack_builder.Finalize(&pack_def));
TF_ASSIGN_OR_RETURN(Node * pack_node, graph->AddNode(pack_def));
pack_node->set_assigned_device_name(dst->assigned_device_name());
for (int i = 0; i < src_replicated_nodes.size(); ++i) {
graph->AddEdge(src_replicated_nodes[i], edge->src_output(), pack_node,
i);
}
graph->AddEdge(pack_node, 0, dst, edge->dst_input());
} else {
return errors::InvalidArgument(
"Dst node should be assigned to an allowed device. Found an "
"edge from node ",
edge->src()->name(), " assigned to ",
edge->src()->assigned_device_name(), " to node ", dst->name(),
" assigned to ", dst_device);
}
}
return absl::OkStatus();
}
private:
absl::flat_hash_map<const Node*, std::vector<Node*>> replicated_nodes_map_;
};
Status ReplicateNodesAndEdges(const std::vector<string>& allowed_devices,
absl::flat_hash_map<Node*, int>* cluster_nodes,
ReplicateHelper* helper, Graph* graph) {
std::queue<Node*> nodes_ready_to_delete;
for (auto& pair : *cluster_nodes) {
Node* node = pair.first;
for (const Edge* edge : node->out_edges()) {
Node* dst = edge->dst();
if (dst->assigned_device_name() != node->assigned_device_name()) {
TF_RETURN_IF_ERROR(helper->ReplicateFromCompositeDeviceToRegularDevice(
edge, allowed_devices, graph));
--pair.second;
}
}
if (cluster_nodes->at(node) == 0) {
nodes_ready_to_delete.push(node);
}
}
while (!nodes_ready_to_delete.empty()) {
Node* node = nodes_ready_to_delete.front();
nodes_ready_to_delete.pop();
for (const Edge* edge : node->in_edges()) {
Node* src = edge->src();
if (src->assigned_device_name() != node->assigned_device_name()) {
helper->ReplicateFromRegularDeviceToCompositeDevice(edge, graph);
} else {
TF_RETURN_IF_ERROR(
helper->ReplicateFromCompositeDeviceToCompositeDevice(
edge, allowed_devices, graph));
if (--(*cluster_nodes)[src] == 0) {
nodes_ready_to_delete.push(src);
}
}
}
cluster_nodes->erase(node);
graph->RemoveNode(node);
}
return absl::OkStatus();
}
}
Status ReplicatePerReplicaNodesInFunctionGraph(
const absl::flat_hash_map<string, const std::vector<string>*>&
composite_devices,
Graph* graph) {
VLOG(1) << "Starting ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
std::set<string> composite_device_names;
for (const auto& it : composite_devices) {
composite_device_names.insert(it.first);
}
absl::flat_hash_map<string, absl::flat_hash_map<Node*, int>>
composite_device_to_cluster_nodes;
for (Node* n : graph->op_nodes()) {
if (composite_device_names.find(n->assigned_device_name()) !=
composite_device_names.end()) {
composite_device_to_cluster_nodes[n->assigned_device_name()].emplace(
n, n->out_edges().size());
}
}
if (composite_device_to_cluster_nodes.empty()) {
VLOG(1) << "No nodes with composiste device found.";
return absl::OkStatus();
}
for (auto& it : composite_device_to_cluster_nodes) {
const std::vector<string>& allowed_devices =
*composite_devices.at(it.first);
if (allowed_devices.empty()) {
return errors::InvalidArgument("No allowed device of composite device: ",
it.first);
}
absl::flat_hash_map<Node*, int>& cluster_nodes = it.second;
if (allowed_devices.size() == 1) {
for (const auto& pair : it.second) {
Node* n = pair.first;
n->set_assigned_device_name(allowed_devices.at(0));
if (n->IsArg()) {
n->AddAttr("sub_index", 0);
}
}
continue;
}
ReplicateHelper helper;
for (const auto& pair : cluster_nodes) {
TF_RETURN_IF_ERROR(
helper.InitializeNode(pair.first, allowed_devices.size()));
}
TF_RETURN_IF_ERROR(ReplicateNodesAndEdges(allowed_devices, &cluster_nodes,
&helper, graph));
if (!cluster_nodes.empty()) {
return errors::InvalidArgument(
"There are still ", cluster_nodes.size(),
" nodes on CompositiveDevice ",
cluster_nodes.begin()->first->assigned_device_name());
}
}
TF_RETURN_IF_ERROR(OptimizeCrossHostControlOutputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostControlInputEdges(
graph, kOptimizeCrossHostEdgesTheshold));
TF_RETURN_IF_ERROR(OptimizeCrossHostDataOutputEdges(
graph, kOptimizeCrossHostDataEdgesTheshold));
VLOG(1) << "Finished ReplicatePerReplicaNodesInFunctionGraph";
VLOG(1) << "Graph #nodes " << graph->num_nodes() << " #edges "
<< graph->num_edges();
return absl::OkStatus();
}
} | #include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include <map>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class GraphHelper {
public:
explicit GraphHelper(const Graph& graph) : graph_(graph) {
for (Node* node : graph.nodes()) {
nodes_by_name_[node->name()] = node;
}
}
Node* GetNodeByName(const string& name) {
const auto it = nodes_by_name_.find(name);
if (it != nodes_by_name_.end()) {
return it->second;
}
for (const auto& entry : nodes_by_name_) {
if (absl::StartsWith(entry.first, name)) {
return entry.second;
}
}
return nullptr;
}
void SetAssignedDevice(const string& node_name, const string& device_name) {
CHECK_NOTNULL(GetNodeByName(node_name))
->set_assigned_device_name(device_name);
}
void CheckArgNum(const int expected_num) {
int arg_num = 0;
for (Node* node : graph_.op_nodes()) {
if (node->IsArg()) {
arg_num++;
}
}
EXPECT_EQ(arg_num, expected_num);
}
void CheckAssignedDevice(const string& node_name,
const string& expected_device_name) {
EXPECT_EQ(expected_device_name,
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name());
}
void CheckAssignedDevicePrefix(const string& node_name,
const string& expected_device_name) {
auto assigned =
CHECK_NOTNULL(GetNodeByName(node_name))->assigned_device_name();
EXPECT_EQ(assigned.rfind(expected_device_name, 0), 0);
}
private:
const Graph& graph_;
std::map<string, Node*> nodes_by_name_;
};
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto one = ops::Const<int32>(scope.WithOpName("one"), 1);
auto write = ops::AssignVariableOp(scope.WithOpName("write"), arg, one);
auto ret = ops::_Retval(
scope.WithOpName("ret").WithControlDependencies({write}), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("one", "/device:CPU:0");
helper.SetAssignedDevice("write", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 9);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevicePrefix("arg/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("arg/R1", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R0", "/device:TPU");
helper.CheckAssignedDevicePrefix("write/R1", "/device:TPU");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("one", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, SingleCompositeDeviceToSingleDevice) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, MultipleCompositeDevices) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg0 = ops::_Arg(scope.WithOpName("arg0"), DT_RESOURCE, 0);
Output arg1 = ops::_Arg(scope.WithOpName("arg1"), DT_RESOURCE, 0);
auto read0 = ops::ReadVariableOp(scope.WithOpName("read0"), arg0, DT_INT32);
auto read1 = ops::ReadVariableOp(scope.WithOpName("read1"), arg1, DT_INT32);
auto identity0 = ops::Identity(scope.WithOpName("identity0"), read0);
auto identity1 = ops::Identity(scope.WithOpName("identity1"), read1);
auto add = ops::Add(scope.WithOpName("add"), identity0, identity1);
auto ret = ops::_Retval(scope.WithOpName("ret"), add, 0);
const std::vector<string> underlying_devices_0 = {"/device:TPU:0",
"/device:TPU:1"};
const std::vector<string> underlying_devices_1 = {"/device:TPU:2",
"/device:TPU:3"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices_0},
{"/device:TPU_COMPOSITE:1", &underlying_devices_1}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read0", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("identity0", "/device:TPU:1");
helper.SetAssignedDevice("arg1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("read1", "/device:TPU_COMPOSITE:1");
helper.SetAssignedDevice("identity1", "/device:TPU:3");
helper.SetAssignedDevice("add", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 8);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("read0/R1", "/device:TPU:1");
helper.CheckAssignedDevice("read1/R1", "/device:TPU:3");
helper.CheckAssignedDevice("identity0", "/device:TPU:1");
helper.CheckAssignedDevice("identity1", "/device:TPU:3");
helper.CheckAssignedDevice("add", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
TEST(ReplicatePerReplicaNodesTest, NestedFunctions) {
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
FunctionDefLibrary fdef_lib;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib);
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
FunctionDef fdef;
TF_ASSERT_OK(GraphToFunctionDef(graph, "Func", &fdef));
*fdef_lib.add_function() = fdef;
TF_ASSERT_OK(flib_def.AddFunctionDef(fdef));
}
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(fdef_lib));
NodeDef def;
TF_ASSERT_OK(NodeDefBuilder("func", "Func", &flib_def)
.Input(arg.name(), 0, DT_RESOURCE)
.Finalize(&def));
Status status;
Node* func = scope.graph()->AddNode(def, &status);
TF_ASSERT_OK(status);
scope.graph()->AddEdge(arg.node(), 0, func, 0);
auto ret = ops::_Retval(scope.WithOpName("ret"), Output(func), 0);
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
GraphHelper helper(graph);
EXPECT_EQ(graph.num_op_nodes(), 3);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("func", "/device:CPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 5);
GraphHelper helper(graph);
helper.CheckArgNum(2);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("arg/R1", "/device:TPU:1");
helper.CheckAssignedDevice("arg/Packed", "/device:CPU:0");
helper.CheckAssignedDevice("func", "/device:CPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
const EdgeSet& packed_in_edges =
helper.GetNodeByName("arg/Packed")->in_edges();
EXPECT_EQ(packed_in_edges.size(), 2);
auto it = packed_in_edges.begin();
EXPECT_EQ(helper.GetNodeByName("arg/R0"), (*it++)->src());
EXPECT_EQ(helper.GetNodeByName("arg/R1"), (*it)->src());
const EdgeSet& func_in_edges = helper.GetNodeByName("func")->in_edges();
EXPECT_EQ(func_in_edges.size(), 1);
EXPECT_EQ(helper.GetNodeByName("arg/Packed"),
(*func_in_edges.begin())->src());
}
}
TEST(ReplicatePerReplicaNodesTest, DeadArgNodes) {
tensorflow::Scope scope = tensorflow::Scope::NewRootScope();
Output arg = ops::_Arg(scope.WithOpName("arg"), DT_RESOURCE, 0);
auto read = ops::ReadVariableOp(scope.WithOpName("read"), arg, DT_INT32);
auto ret = ops::_Retval(scope.WithOpName("ret"), read, 0);
const std::vector<string> underlying_devices = {"/device:TPU:0",
"/device:TPU:1"};
const absl::flat_hash_map<string, const std::vector<string>*>
composite_devices = {{"/device:TPU_COMPOSITE:0", &underlying_devices}};
Graph graph(OpRegistry::Global());
TF_ASSERT_OK(scope.ToGraph(&graph));
{
ASSERT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.SetAssignedDevice("arg", "/device:TPU_COMPOSITE:0");
helper.SetAssignedDevice("read", "/device:TPU:0");
helper.SetAssignedDevice("ret", "/device:CPU:0");
}
TF_EXPECT_OK(
ReplicatePerReplicaNodesInFunctionGraph(composite_devices, &graph));
{
EXPECT_EQ(graph.num_op_nodes(), 3);
GraphHelper helper(graph);
helper.CheckArgNum(1);
helper.CheckAssignedDevice("arg/R0", "/device:TPU:0");
helper.CheckAssignedDevice("read", "/device:TPU:0");
helper.CheckAssignedDevice("ret", "/device:CPU:0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_per_replica_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/replicate_per_replica_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f1fc3e6-a0f2-4449-b1a7-b42f92f2e61b | cpp | tensorflow/tensorflow | xent_op | tensorflow/core/kernels/xent_op.cc | tensorflow/core/kernels/xent_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/xent_op.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class SoftmaxXentWithLogitsOp : public OpKernel {
public:
explicit SoftmaxXentWithLogitsOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& logits_in = context->input(0);
const Tensor& labels_in = context->input(1);
TensorShape shape_in = logits_in.shape();
BCast bcast(BCast::FromShape(logits_in.shape()),
BCast::FromShape(labels_in.shape()),
false);
if (!logits_in.IsSameSize(labels_in)) {
OP_REQUIRES(context, bcast.IsValid(),
errors::InvalidArgument(
"logits and labels must be broadcastable: logits_size=",
logits_in.shape().DebugString(),
" labels_size=", labels_in.shape().DebugString()));
shape_in = BCast::ToShape(bcast.output_shape());
}
OP_REQUIRES(context, TensorShapeUtils::IsMatrix(shape_in),
errors::InvalidArgument("logits and labels must be either "
"2-dimensional, or broadcasted to be "
"2-dimensional"));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(context, !OpDeterminismRequired(),
errors::Unimplemented(
"The GPU implementation of SoftmaxCrossEntropyWithLogits"
" that would have been executed is not deterministic."
" Note that the Python API uses an alternative,"
" deterministic, GPU-accelerated path when determinism is"
" enabled."));
}
Tensor scratch;
OP_REQUIRES_OK(
context, context->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({shape_in.dim_size(0), 1}),
&scratch));
Tensor* loss_out = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(
0, TensorShape({shape_in.dim_size(0)}), &loss_out));
Tensor* back_out = nullptr;
OP_REQUIRES_OK(context, context->forward_input_or_allocate_output(
{0}, 1, shape_in, &back_out));
if (shape_in.dim_size(0) > 0) {
functor::XentFunctor<Device, T> functor;
functor(context->eigen_device<Device>(), shape_in.AsEigenDSizes<2>(),
BCast::ToIndexArray<2>(bcast.x_bcast()),
BCast::ToIndexArray<2>(bcast.y_bcast()),
logits_in.template shaped<T, 2>(bcast.x_reshape()),
labels_in.template shaped<T, 2>(bcast.y_reshape()),
scratch.matrix<T>(), loss_out->vec<T>(), back_out->matrix<T>());
}
}
};
namespace functor {
template <typename Device, typename T>
struct XentFunctorBase {
void operator()(const Device& d,
const Eigen::DSizes<Eigen::DenseIndex, 2>& shape,
const Eigen::array<Eigen::DenseIndex, 2>& logits_bcast,
const Eigen::array<Eigen::DenseIndex, 2>& labels_bcast,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<T>::ConstMatrix labels,
typename TTypes<T>::Matrix scratch,
typename TTypes<T>::Vec loss,
typename TTypes<T>::Matrix backprop) {
if (shape[0] > 0) {
XentEigenImpl<Device, T>::Compute(d, shape, logits_bcast, labels_bcast,
logits, labels, scratch, loss,
backprop);
}
}
};
template <typename T>
struct XentFunctor<CPUDevice, T> : XentFunctorBase<CPUDevice, T> {};
}
#define REGISTER_CPU(T) \
REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T"), \
SoftmaxXentWithLogitsOp<CPUDevice, T>);
TF_CALL_half(REGISTER_CPU);
TF_CALL_float(REGISTER_CPU);
TF_CALL_double(REGISTER_CPU);
TF_CALL_bfloat16(REGISTER_CPU);
#if (defined(GOOGLE_CUDA) && GOOGLE_CUDA) || \
(defined(TENSORFLOW_USE_ROCM) && TENSORFLOW_USE_ROCM)
#define REGISTER_GPU(T) \
REGISTER_KERNEL_BUILDER(Name("SoftmaxCrossEntropyWithLogits") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T"), \
SoftmaxXentWithLogitsOp<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_GPU);
#endif
} | #include "tensorflow/core/kernels/xent_op.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
template <class T>
static Graph* Xent(int batch_size, int num_classes, DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits(type, TensorShape({batch_size, num_classes}));
logits.flat<T>().setRandom();
Tensor labels(type, TensorShape({batch_size, num_classes}));
labels.flat<T>().setRandom();
test::graph::Binary(g, "SoftmaxCrossEntropyWithLogits",
test::graph::Constant(g, logits),
test::graph::Constant(g, labels));
return g;
}
#define BM_XentDev(BATCH, CLASS, DEVICE, C_TYPE, TF_TYPE) \
static void BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Xent<C_TYPE>(BATCH, CLASS, TF_TYPE), \
false) \
.Run(state); \
const int64_t tot = \
static_cast<int64_t>(state.iterations()) * BATCH * CLASS; \
state.SetItemsProcessed(tot); \
state.SetBytesProcessed(tot * sizeof(C_TYPE)); \
} \
BENCHMARK(BM_Xent##_##BATCH##_##CLASS##_##DEVICE##_##C_TYPE)->UseRealTime()
#ifdef GOOGLE_CUDA
BM_XentDev(16, 10000, gpu, float, DT_FLOAT);
BM_XentDev(16, 30000, gpu, float, DT_FLOAT);
BM_XentDev(16, 100000, gpu, float, DT_FLOAT);
BM_XentDev(32, 10000, gpu, float, DT_FLOAT);
BM_XentDev(32, 30000, gpu, float, DT_FLOAT);
BM_XentDev(32, 100000, gpu, float, DT_FLOAT);
BM_XentDev(64, 10000, gpu, float, DT_FLOAT);
BM_XentDev(64, 30000, gpu, float, DT_FLOAT);
BM_XentDev(64, 100000, gpu, float, DT_FLOAT);
#endif
#define BM_XentDev_CPU(C_TYPE, TF_TYPE) \
BM_XentDev(1, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(2, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(4, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(8, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(16, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(32, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(64, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(128, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(256, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(512, 10000, cpu, C_TYPE, TF_TYPE); \
BM_XentDev(1024, 10000, cpu, C_TYPE, TF_TYPE)
BM_XentDev_CPU(float, DT_FLOAT);
BM_XentDev_CPU(bfloat16, DT_BFLOAT16);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/xent_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/xent_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
40deacba-c32f-4c52-97bb-a950b5905922 | cpp | tensorflow/tensorflow | shape_util | tensorflow/compiler/tf2xla/kernels/shape_util.cc | third_party/xla/xla/shape_util_test.cc | #include "tensorflow/compiler/tf2xla/kernels/shape_util.h"
#include <limits>
#include "absl/status/status.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Status TensorShapeToConstant(const TensorShape& input_shape,
Tensor* shape_constant) {
const int dims = input_shape.dims();
if (shape_constant->dtype() == DT_INT32) {
auto vec = shape_constant->vec<int32>();
for (int i = 0; i < dims; ++i) {
int64_t dim_size = input_shape.dim_size(i);
if (!FastBoundsCheck(dim_size, std::numeric_limits<int32>::max())) {
return errors::InvalidArgument(
"Shape with out_type=int32 does not support tensors > int32max",
" but dim ", i, " is ", dim_size);
}
vec(i) = static_cast<int32>(dim_size);
}
} else {
auto vec = shape_constant->vec<int64_t>();
for (int i = 0; i < dims; ++i) {
int64_t dim_size = input_shape.dim_size(i);
vec(i) = dim_size;
}
}
return absl::OkStatus();
}
} | #include "xla/shape_util.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
TEST(ShapeUtilTest, GetDimensionHelperCanNegativeIndex) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
EXPECT_EQ(3, ShapeUtil::GetDimension(matrix, -1));
EXPECT_EQ(2, ShapeUtil::GetDimension(matrix, -2));
}
TEST(ShapeUtilTest, GetDimensionHelperExampleInDocumentationTest) {
auto shape = ShapeUtil::MakeShape(F32, {1, 2, 3, 4});
ASSERT_EQ(4, ShapeUtil::GetDimension(shape, -1));
}
TEST(ShapeUtilTest, NegativeIndexOobFails) {
Shape matrix = ShapeUtil::MakeShape(F32, {2, 3});
ASSERT_DEATH(ShapeUtil::GetDimension(matrix, -3), "dimension_number >= 0");
}
TEST(ShapeUtilTest, CreateRank3DimensionVectorFromShape) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
DimensionVector dimensions = ShapeUtil::CreateDimensionVectorFromShape(shape);
EXPECT_THAT(dimensions, ElementsAre(3, 2, 7));
}
TEST(ShapeUtilTest, Rank1DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3});
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank2DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank3DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7});
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, Rank4DimensionIndexing) {
Shape shape = ShapeUtil::MakeShape(F32, {3, 2, 7, 8});
ASSERT_EQ(8, shape.dimensions(3));
ASSERT_EQ(7, shape.dimensions(2));
ASSERT_EQ(2, shape.dimensions(1));
ASSERT_EQ(3, shape.dimensions(0));
}
TEST(ShapeUtilTest, CompatibleIdenticalShapes) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::Compatible(shape1, shape2));
}
TEST(ShapeUtilTest, TokenCompatibility) {
EXPECT_TRUE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Compatible(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Compatible(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTokenShape()})));
}
TEST(ShapeUtilTest, TokensEqualShapes) {
EXPECT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeTokenShape()));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShape(F32, {})));
EXPECT_FALSE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTokenShape()));
EXPECT_TRUE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {0, 1})}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTokenShape(),
ShapeUtil::MakeShapeWithDenseLayout(S32, {3, 4}, {1, 0})})));
}
TEST(ShapeUtilTest, CompatibleNotIdenticalShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_1 = shape_1.mutable_layout();
layout_1->clear_minor_to_major();
layout_1->add_minor_to_major(0);
layout_1->add_minor_to_major(1);
Shape shape_2 = ShapeUtil::MakeShape(F32, {3, 2});
auto layout_2 = shape_2.mutable_layout();
layout_2->clear_minor_to_major();
layout_2->add_minor_to_major(1);
layout_2->add_minor_to_major(0);
EXPECT_FALSE(ShapeUtil::Equal(shape_1, shape_2));
EXPECT_TRUE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, CompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {3, 2});
ASSERT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleIgnoringFpPrecision) {
Shape shape1 = ShapeUtil::MakeShape(BF16, {3, 2});
Shape shape2 = ShapeUtil::MakeShape(F32, {2, 2});
ASSERT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
}
TEST(ShapeUtilTest, IncompatibleDifferentElementShapes) {
Shape shape_1 = ShapeUtil::MakeShape(F32, {3, 2});
Shape shape_2 = ShapeUtil::MakeShape(PRED, {3, 2});
EXPECT_FALSE(ShapeUtil::Compatible(shape_1, shape_2));
}
TEST(ShapeUtilTest, EqualIgnoringFpPrecision) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringFpPrecision) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringFpPrecision(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, EqualIgnoringElementType) {
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(S32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {4, 3}, {0, 1})));
EXPECT_TRUE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(PRED, {4, 3}, {0, 1})));
}
TEST(ShapeUtilTest, UnequalIgnoringElementType) {
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {4, 3}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {0, 1})));
EXPECT_FALSE(ShapeUtil::EqualIgnoringElementType(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 4}, {0, 1}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {3, 4}, {1, 0})));
}
TEST(ShapeUtilTest, EqualDynamicShapes) {
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {true, false})));
EXPECT_FALSE(
ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 3}, {true, false}),
ShapeUtil::MakeShape(F32, {4, 3}, {false, false})));
EXPECT_FALSE(ShapeUtil::Equal(
ShapeUtil::MakeShape(F32, {Shape::kUnboundedSize}, {true}),
ShapeUtil::MakeShape(F32, {2}, {true})));
}
TEST(ShapeUtilTest, CompatibleDynamicShapes) {
Shape shape_a = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_a.mutable_layout() = Layout({1, 0});
Shape shape_b = ShapeUtil::MakeShape(F32, {4, 3}, {true, false});
*shape_b.mutable_layout() = Layout({0, 1});
Shape shape_c = ShapeUtil::MakeShape(F32, {4, 3}, {false, true});
*shape_c.mutable_layout() = Layout({0, 1});
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_a));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_b));
EXPECT_TRUE(ShapeUtil::Compatible(shape_a, shape_c));
}
TEST(ShapeUtilTest, CompatibleTuples) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_TRUE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, MakeMaybeTupleShape) {
Shape s1 =
ShapeUtil::MakeMaybeTupleShape({ShapeUtil::MakeShape(F32, {3, 2})});
EXPECT_TRUE(ShapeUtil::Compatible(s1, ShapeUtil::MakeShape(F32, {3, 2})));
}
TEST(ShapeUtilTest, CompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {3, 2}), ShapeUtil::MakeShape(F32, {4, 5})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F64, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithSwappedElements) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(PRED, {4, 5})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesIgnoringFpPrecision) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(BF16, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(BF16, {4, 5})});
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentPrimitiveType) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(S32, {3, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
EXPECT_TRUE(ShapeUtil::CompatibleIgnoringElementType(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleTuplesWithDifferentDimensions) {
Shape tuple1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {3, 2})});
Shape tuple2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(PRED, {4, 5}), ShapeUtil::MakeShape(F32, {4, 2})});
EXPECT_FALSE(ShapeUtil::Compatible(tuple1, tuple2));
}
TEST(ShapeUtilTest, IncompatibleScalarVsTuple) {
Shape shape1 = ShapeUtil::MakeShape(F32, {});
Shape shape2 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 2}), ShapeUtil::MakeShape(U32, {})});
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
}
TEST(ShapeUtilTest, OpaqueVsArray) {
Shape shape1 = ShapeUtil::MakeShape(F32, {5, 7});
Shape shape2 = ShapeUtil::MakeOpaqueShape();
EXPECT_FALSE(ShapeUtil::Compatible(shape1, shape2));
EXPECT_FALSE(ShapeUtil::Compatible(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringFpPrecision(shape2, shape1));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape1, shape2));
EXPECT_FALSE(ShapeUtil::CompatibleIgnoringElementType(shape2, shape1));
}
TEST(ShapeUtilTest, ScalarDefaultLayoutEqualsScalarEmptyMin2Maj) {
Shape scalar_default_layout = ShapeUtil::MakeShape(F32, {});
ASSERT_TRUE(scalar_default_layout.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_default_layout);
const Shape scalar_empty_min2maj =
ShapeUtil::MakeShapeWithDenseLayout(F32, {}, {});
ASSERT_TRUE(scalar_empty_min2maj.has_layout())
<< ShapeUtil::HumanStringWithLayout(scalar_empty_min2maj);
EXPECT_TRUE(ShapeUtil::Equal(scalar_default_layout, scalar_empty_min2maj));
}
TEST(ShapeUtilTest, ByteSizeOfWithoutPadding) {
EXPECT_EQ(4, ShapeUtil::ByteSizeOfPrimitiveType(F32));
EXPECT_EQ(4, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {})));
EXPECT_EQ(800, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F32, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(F64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(F64, {10, 20})));
EXPECT_EQ(8, ShapeUtil::ByteSizeOfPrimitiveType(C64));
EXPECT_EQ(8, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {})));
EXPECT_EQ(1600, ShapeUtil::ByteSizeOf(ShapeUtil::MakeShape(C64, {10, 20})));
}
TEST(ShapeUtilTest, ByteStrides) {
Shape shape1 = ShapeUtil::MakeShape(F32, {3, 5, 7});
Shape shape2 = ShapeUtil::MakeShape(F16, {5, 7, 9});
EXPECT_THAT(*ShapeUtil::ByteStrides(shape1), ElementsAre(140, 28, 4));
EXPECT_THAT(*ShapeUtil::ByteStrides(shape2), ElementsAre(126, 18, 2));
}
TEST(ShapeUtilTest, NilShape) {
EXPECT_TRUE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {1, 2, 3})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(ShapeUtil::MakeShape(F32, {0, 1})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_FALSE(ShapeUtil::IsEmptyTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {0})})));
}
TEST(ShapeUtilTest, NestedTuple) {
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeTupleShape({})})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeTupleShape({})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeShape(S32, {})})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({}), ShapeUtil::MakeTupleShape({})})));
}
TEST(ShapeUtilTest, NestedTupleWithPtrs) {
const Shape nil = ShapeUtil::MakeNil();
const Shape s32 = ShapeUtil::MakeShape(S32, {});
EXPECT_FALSE(ShapeUtil::IsNestedTuple(nil));
EXPECT_FALSE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&s32})));
EXPECT_TRUE(
ShapeUtil::IsNestedTuple(ShapeUtil::MakeTupleShapeWithPtrs({&nil})));
EXPECT_FALSE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&s32, &nil})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &s32})));
EXPECT_TRUE(ShapeUtil::IsNestedTuple(
ShapeUtil::MakeTupleShapeWithPtrs({&nil, &nil})));
}
TEST(ShapeUtilTest, ElementsIn) {
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1})));
EXPECT_EQ(1, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2})));
EXPECT_EQ(2, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_EQ(0, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_EQ(15, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_EQ(221, ShapeUtil::ElementsIn(ShapeUtil::MakeShape(S32, {13, 17})));
}
TEST(ShapeUtilTest, HasPrimitiveType) {
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {}), S16));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeShape(S32, {0}), S32));
EXPECT_FALSE(ShapeUtil::HasPrimitiveType(ShapeUtil::MakeTupleShape({}), S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(S32, {})}),
S32));
EXPECT_TRUE(ShapeUtil::HasPrimitiveType(
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S16, {})})}),
S16));
}
TEST(ShapeUtilTest, IsZeroElementArray) {
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 1})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {2, 1})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {3, 0, 5})));
EXPECT_TRUE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {0, 3, 0})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {1, 3, 5})));
EXPECT_FALSE(
ShapeUtil::IsZeroElementArray(ShapeUtil::MakeShape(S32, {13, 17})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeNil()));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(ShapeUtil::MakeTupleShape({})));
EXPECT_FALSE(ShapeUtil::IsZeroElementArray(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(S32, {0, 3, 0})})));
}
TEST(ShapeUtilTest, SameDimensions) {
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(S32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(S32, {1})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0}),
ShapeUtil::MakeShape(S32, {0})));
EXPECT_TRUE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::MakeShape(S32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {2})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {0, 0}),
ShapeUtil::MakeShape(F32, {0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {}),
ShapeUtil::MakeShape(F32, {1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 1})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1}),
ShapeUtil::MakeShape(F32, {1, 0})));
EXPECT_FALSE(ShapeUtil::SameDimensions(ShapeUtil::MakeShape(S32, {1, 1}),
ShapeUtil::MakeShape(F32, {1, 2})));
}
TEST(ShapeUtilTest, GetSubshape) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(array_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, *ShapeUtil::GetMutableSubshape(&array_shape, {})));
Shape tuple_shape =
ShapeUtil::MakeTupleShape({array_shape, array_shape, array_shape});
EXPECT_TRUE(
ShapeUtil::Equal(tuple_shape, ShapeUtil::GetSubshape(tuple_shape, {})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(array_shape, ShapeUtil::GetSubshape(tuple_shape, {2})));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_TRUE(ShapeUtil::Equal(nested_tuple_shape,
ShapeUtil::GetSubshape(nested_tuple_shape, {})));
EXPECT_TRUE(ShapeUtil::Equal(
array_shape, ShapeUtil::GetSubshape(nested_tuple_shape, {0})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {1})));
EXPECT_TRUE(
ShapeUtil::Equal(ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::GetSubshape(nested_tuple_shape, {2, 0})));
}
TEST(ShapeUtilTest, IsLeafIndex) {
Shape array_shape = ShapeUtil::MakeShape(F32, {42, 42, 123});
EXPECT_TRUE(ShapeUtil::IsLeafIndex(array_shape, {}));
Shape tuple_shape = ShapeUtil::MakeTupleShape({array_shape, array_shape});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(tuple_shape, {1}));
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape(
{array_shape, ShapeUtil::MakeTupleShape({array_shape, array_shape}),
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({array_shape, array_shape}),
array_shape})});
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {0}));
EXPECT_FALSE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 0}));
EXPECT_TRUE(ShapeUtil::IsLeafIndex(nested_tuple_shape, {1, 1}));
}
TEST(ShapeUtilTest, ForEachSubshapeArray) {
const Shape shape = ShapeUtil::MakeShape(F32, {2, 3});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_EQ(&shape, &subshape);
EXPECT_TRUE(index.empty());
++calls;
});
EXPECT_EQ(1, calls);
}
TEST(ShapeUtilTest, ForEachSubshapeNestedTuple) {
const Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachSubshape(
shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) {
EXPECT_TRUE(
ShapeUtil::Equal(subshape, ShapeUtil::GetSubshape(shape, index)));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, ForEachMutableSubshapeNestedTuple) {
Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachMutableSubshape(
&shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) {
EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index));
if (calls == 0) {
EXPECT_TRUE(index.empty());
} else if (calls == 4) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape));
}
++calls;
});
EXPECT_EQ(5, calls);
}
TEST(ShapeUtilTest, ForEachMutableLeafShapeTest) {
Shape shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {42}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {101}),
ShapeUtil::MakeShape(PRED, {33})})});
int calls = 0;
ShapeUtil::ForEachMutableLeafShape(
&shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) {
EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index));
if (calls == 0) {
EXPECT_EQ(42, ShapeUtil::ElementsIn(*subshape));
} else if (calls == 1) {
EXPECT_EQ(101, ShapeUtil::ElementsIn(*subshape));
} else if (calls == 2) {
EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape));
}
++calls;
});
EXPECT_EQ(3, calls);
}
TEST(ShapeUtilTest, InsertedOrDeleted1SizedDimensions) {
Shape shape0 = ShapeUtil::MakeShape(S32, {9, 1, 4});
Shape shape1 = ShapeUtil::MakeShape(S32, {1, 9, 4, 1});
Shape shape2 = ShapeUtil::MakeShape(S32, {3, 1, 12});
EXPECT_TRUE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape1).has_value());
EXPECT_FALSE(
ShapeUtil::InsertedOrDeleted1SizedDimensions(shape0, shape2).has_value());
}
TEST(ShapeUtilTest, ForEachIndex) {
struct ShapeDimensionAndNumberInvocations {
std::vector<int64_t> dimensions;
int invocations;
} test_data[] = {
{{}, 1}, {{0}, 0}, {{16}, 16}, {{3, 0}, 0},
{{0, 2}, 0}, {{4, 16}, 64}, {{6, 11, 17}, 1122}, {{6, 11, 5, 17}, 5610},
};
for (const auto& data : test_data) {
Shape shape = ShapeUtil::MakeShape(F32, data.dimensions);
int invocations = 0;
auto increment_func = [&invocations](absl::Span<const int64_t> indexes) {
invocations++;
return true;
};
std::vector<int64_t> zero_base(data.dimensions.size(), 0);
std::vector<int64_t> step(data.dimensions.size(), 1);
ShapeUtil::ForEachIndex(shape, zero_base, data.dimensions, step,
increment_func);
EXPECT_EQ(invocations, data.invocations);
}
}
TEST(ShapeUtilTest, ForEachIndexWithStatus) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int invocations = 0;
auto increment_func =
[&invocations](
absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
if (++invocations == 5) {
return Unimplemented("Cannot increment beyond 5.");
}
return true;
};
absl::Status error_status = ShapeUtil::ForEachIndexWithStatus(
shape, {0, 0}, {10, 10}, {0, 1},
increment_func);
EXPECT_FALSE(error_status.ok());
EXPECT_THAT(error_status.message(),
::testing::HasSubstr("Cannot increment beyond 5."));
EXPECT_EQ(invocations, 5);
}
TEST(ShapeUtilTest, GetForEachIndexParallelThreadCount) {
const int kThreadCount = ShapeUtil::GetForEachIndexParallelThreadCount();
Shape shape = ShapeUtil::MakeShape(F32, {10, 100});
auto check_func = [kThreadCount](absl::Span<const int64_t> ,
int thread_id) -> absl::StatusOr<bool> {
EXPECT_GE(thread_id, -1);
EXPECT_LT(thread_id, kThreadCount);
return true;
};
for (int i = 0; i < 10; ++i) {
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 100},
{1, 1}, check_func);
}
}
TEST(ShapeUtilTest, ForEachIndexParallel) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_Rank0) {
Shape shape = ShapeUtil::MakeShape(F32, {});
int64_t output = -1;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output = indexes.size();
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {}, {},
{}, set_func);
EXPECT_EQ(output, 0);
}
TEST(ShapeUtilTest, ForEachIndexParallel_Empty) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 0});
bool called = false;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
called = true;
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {2, 0},
{1, 1}, set_func);
EXPECT_FALSE(called);
}
TEST(ShapeUtilTest, ForEachIndexParallel_DimensionPinnedWithZeros) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
int64_t output[2][2] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {1, 0}, {0, 2},
{0, 1}, set_func);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 2; ++j) {
if (i == 1) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_WithSkips) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10] = {};
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {2, 3}, {3, 1},
{2, 1}, set_func);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
if ((i == 2 || i == 4) && j == 3) {
EXPECT_EQ(output[i][j], init + i + j);
} else {
EXPECT_EQ(output[i][j], 0);
}
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_CalledTwice) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10});
int64_t output[10][10];
int init = 5;
auto set_func = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init + indexes[0] + indexes[1];
return true;
};
int init2 = 15;
auto set_func2 = [&](absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[indexes[0]][indexes[1]] = init2 + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func);
ShapeUtil::ForEachIndexParallel(shape, {0, 0}, {10, 10},
{1, 1}, set_func2);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
EXPECT_EQ(output[i][j], init2 + i + j);
}
}
}
TEST(ShapeUtilTest, ForEachIndexParallel_CalledFromMultipleThreads) {
constexpr int kCallingThreads = 10;
constexpr int kDim0 = 10;
constexpr int kDim1 = 10;
constexpr int kInit = 5;
const Shape kShape = ShapeUtil::MakeShape(F32, {kDim0, kDim1});
int64_t output[kCallingThreads][kDim0][kDim1];
{
tsl::thread::ThreadPool pool(tsl::Env::Default(), "foreach",
kCallingThreads);
for (int t = 0; t < kCallingThreads; ++t) {
pool.Schedule([&output, &kShape, t] {
auto set_func = [&output, t](
absl::Span<const int64_t> indexes,
int ) -> absl::StatusOr<bool> {
output[t][indexes[0]][indexes[1]] = kInit + indexes[0] + indexes[1];
return true;
};
ShapeUtil::ForEachIndexParallel(kShape, {0, 0},
{kDim0, kDim1},
{1, 1}, set_func);
});
}
}
for (int t = 0; t < kCallingThreads; ++t) {
for (int i = 0; i < kDim0; ++i) {
for (int j = 0; j < kDim1; ++j) {
EXPECT_EQ(output[t][i][j], kInit + i + j);
}
}
}
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1x1_to_1x1x1) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {1, 1, 1, 1}),
ShapeUtil::MakeShape(S32, {1, 1, 1})),
ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_1x1x1_to_1x1x1x1) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {1, 1, 1}),
ShapeUtil::MakeShape(S32, {1, 1, 1, 1})),
ElementsAre(std::make_pair(0, 0), std::make_pair(1, 1),
std::make_pair(2, 2)));
}
TEST(ShapeUtilTest, DimensionsUnmodifiedByReshape_4x1x3x5x6x7_to_2x6x1x5x1x42) {
EXPECT_THAT(ShapeUtil::DimensionsUnmodifiedByReshape(
ShapeUtil::MakeShape(S32, {4, 1, 3, 5, 6, 7}),
ShapeUtil::MakeShape(S32, {2, 6, 1, 5, 1, 42})),
ElementsAre(std::make_pair(3, 3)));
}
TEST(ShapeUtilTest, ReshapeIsBitcast_3x4_6x2) {
for (bool input_is_row_major : {true, false}) {
for (bool output_is_row_major : {true, false}) {
Layout input_layout = input_is_row_major ? LayoutUtil::MakeLayout({1, 0})
: LayoutUtil::MakeLayout({0, 1});
Layout output_layout = output_is_row_major
? LayoutUtil::MakeLayout({1, 0})
: LayoutUtil::MakeLayout({0, 1});
EXPECT_EQ(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(
F32, {3, 4}, input_layout.minor_to_major()),
ShapeUtil::MakeShapeWithDenseLayout(
F32, {6, 2}, output_layout.minor_to_major())),
input_is_row_major && output_is_row_major);
}
}
}
TEST(ShapeUtilTest, ReshapeIsBitcast_3x2x2_6x2_Dim1IsMostMinor) {
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {6, 2}, {0, 1})));
}
TEST(ShapeUtilTest, ReshapeIsBitcastIgnoreElementType) {
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
true));
EXPECT_FALSE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
false));
}
TEST(ShapeUtilTest, TransposeIsBitcastIgnoreElementType) {
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {5, 10}, {0, 1}), {1, 0},
true));
EXPECT_FALSE(ShapeUtil::TransposeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {5, 10}, {0, 1}), {1, 0},
false));
}
TEST(ShapeUtilTest, IsReshapeOrTransposeBitcast) {
EXPECT_TRUE(ShapeUtil::IsReshapeOrTransposeBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 5}, {1, 0}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 10}, {0, 1})));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {1, 0, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F16, {6, 2}, {0, 1}),
true));
}
TEST(ShapeUtilTest, HasDegenerateDimensions) {
EXPECT_TRUE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 1, 2})));
EXPECT_TRUE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 1, 1})));
EXPECT_FALSE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 3, 5})));
EXPECT_FALSE(
ShapeUtil::HasDegenerateDimensions(ShapeUtil::MakeShape(F32, {3, 0, 5})));
}
TEST(ShapeUtilTest, PermuteDimensionsLayout) {
std::vector<int64_t> layout(3);
std::iota(layout.begin(), layout.end(), 0);
do {
Shape s = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 100, 1000}, layout);
SCOPED_TRACE(absl::StrCat("s=", ShapeUtil::HumanString(s)));
std::vector<int64_t> permutation(3);
std::iota(permutation.begin(), permutation.end(), 0);
do {
SCOPED_TRACE(
absl::StrCat("permutation=", absl::StrJoin(permutation, ",")));
EXPECT_TRUE(ShapeUtil::TransposeIsBitcast(
s, ShapeUtil::PermuteDimensions(permutation, s), permutation));
} while (std::next_permutation(permutation.begin(), permutation.end()));
} while (std::next_permutation(layout.begin(), layout.end()));
}
TEST(ShapeUtilTest, UpdateDynamicDimensions) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape});
ShapeUtil::UpdateDynamicDimension(&tuple_shape, {0}, 1, true);
EXPECT_TRUE(ShapeUtil::GetSubshape(tuple_shape, {0}).is_dynamic_dimension(1));
}
TEST(ShapeUtilTest, InvalidDynamicDimension) {
absl::StatusOr<Shape> error_status = ShapeUtil::MakeValidatedShape(
F32, {Shape::kUnboundedSize, Shape::kUnboundedSize}, {true, false});
EXPECT_FALSE(error_status.ok());
EXPECT_THAT(error_status.status().message(),
::testing::HasSubstr(
"Cannot mark a dynamic dimension at dim=1 as static"));
}
TEST(ShapeUtilTest, PermuteDynamicDimensions) {
Shape shape =
ShapeUtil::MakeShape(F32, {10, 100, 1000},
{false, true, true});
SCOPED_TRACE(absl::StrCat("shape=", shape.ToString()));
std::vector<int64_t> permutation(3);
std::iota(permutation.begin(), permutation.end(), 0);
do {
SCOPED_TRACE(absl::StrCat("permutation=", absl::StrJoin(permutation, ",")));
auto permuted = ShapeUtil::PermuteDimensions(permutation, shape);
for (int i = 0; i < shape.rank(); i++) {
EXPECT_EQ(permuted.dimensions(i), shape.dimensions(permutation[i]));
EXPECT_EQ(permuted.is_dynamic_dimension(i),
shape.is_dynamic_dimension(permutation[i]));
}
} while (std::next_permutation(permutation.begin(), permutation.end()));
}
TEST(ShapeUtilTest, PrependMajorDimension) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30});
EXPECT_EQ(ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShape(F32, {40, 10, 20, 30}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {0, 2, 1});
EXPECT_EQ(
ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShapeWithDenseLayout(F32, {40, 10, 20, 30}, {1, 3, 2, 0}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {2, 1, 0});
EXPECT_EQ(
ShapeUtil::PrependMajorDimension(40, shape),
ShapeUtil::MakeShapeWithDenseLayout(F32, {40, 10, 20, 30}, {3, 2, 1, 0}));
}
TEST(ShapeUtilTest, AppendMinorDimension) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 20, 30});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShape(F32, {10, 20, 30, 40}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {2, 1, 0});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30, 40},
{3, 2, 1, 0}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30}, {0, 2, 1});
ShapeUtil::AppendMinorDimension(40, &shape);
EXPECT_EQ(shape, ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 20, 30, 40},
{3, 0, 2, 1}));
}
TEST(ShapeUtilTest, MoveDimToMajor) {
Shape shape = ShapeUtil::MakeShape(F32, {10, 10, 10});
Shape new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(shape, new_shape);
new_shape = ShapeUtil::MoveDimToMajor(shape, 1);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {2, 0, 1}));
shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {0, 2, 1});
new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {2, 1, 0}));
shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {10, 10, 10}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10, 10}, {0, 2, 1})});
new_shape = ShapeUtil::MoveDimToMajor(shape, 0);
EXPECT_EQ(new_shape,
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {10, 10, 10}),
ShapeUtil::MakeShapeWithDenseLayout(
F32, {10, 10, 10}, {2, 1, 0})}));
}
TEST(ShapeUtilTest, DeleteDimensions) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2}, {2, 0, 1});
Shape new_shape = ShapeUtil::DeleteDimensions({1}, shape);
EXPECT_EQ(new_shape,
ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 2}, {1, 0}));
}
TEST(ShapeUtilTest, MakeShapeWithDescendingLayoutAndSamePhysicalLayout) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {128, 24, 4, 48, 48},
{2, 4, 3, 1, 0});
Shape new_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape);
EXPECT_EQ(new_shape, ShapeUtil::MakeShapeWithDenseLayout(
F32, {128, 24, 48, 48, 4}, {4, 3, 2, 1, 0}));
}
TEST(ShapeUtilTest, DeduceTransposeDimensionsForBitcast) {
Shape input_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
Shape output_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 5}, {0, 1});
std::vector<int64_t> expected_permutation = {1, 0};
EXPECT_EQ(std::make_optional(expected_permutation),
ShapeUtil::DeduceTransposeDimensionsForBitcast(input_shape,
output_shape));
}
TEST(ShapeUtilTest, DeduceTransposeDimensionsForBitcastNegative) {
Shape input_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3}, {1, 0});
Shape output_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 5}, {1, 0});
EXPECT_EQ(std::nullopt, ShapeUtil::DeduceTransposeDimensionsForBitcast(
input_shape, output_shape));
}
TEST(ShapeUtilTest, DeleteDimensionsUnsorted) {
Shape shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 3, 2, 7, 9},
{2, 0, 1, 4, 3});
Shape a = ShapeUtil::DeleteDimensions({1, 2, 3}, shape);
Shape b = ShapeUtil::DeleteDimensions({3, 2, 1}, shape);
EXPECT_EQ(a, b);
EXPECT_EQ(a, ShapeUtil::MakeShapeWithDenseLayout(F32, {5, 9}, {0, 1}));
}
TEST(ShapeUtilTest, IsEffectivelyMostMajorDimension) {
Shape shape0 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{4, 0, 1, 2, 3});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape0, 2));
Shape shape1 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{4, 1, 2, 3, 0});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape1, 2));
Shape shape2 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 279},
{0, 1, 2, 3, 4});
EXPECT_FALSE(ShapeUtil::IsEffectivelyMostMajorDimension(shape2, 2));
Shape shape3 = ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 1, 16, 1, 1},
{0, 1, 2, 3, 4});
EXPECT_TRUE(ShapeUtil::IsEffectivelyMostMajorDimension(shape2, 4));
}
TEST(ShapeUtilTest, B_250640044) {
ShapeProto proto;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(element_type: TUPLE
tuple_shapes {
element_type: S8
dimensions: 137438953472
layout {
minor_to_major: 0
dim_level_types: DIM_COMPRESSED
physical_shape {
element_type: TUPLE
tuple_shapes {}
}
}
is_dynamic_dimension: false
})pb",
&proto));
Shape shape(proto);
EXPECT_FALSE(ShapeUtil::ValidateShape(shape).ok());
}
TEST(ShapeUtilTest, B_251055887) {
ShapeProto proto;
EXPECT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
element_type: S8
dimensions: 0
dimensions: 8
dimensions: 0
dimensions: 0
dimensions: 4
dimensions: 1
dimensions: 1
dimensions: 6
dimensions: 281474976710657
dimensions: 1
layout {
minor_to_major: 1
minor_to_major: 3
minor_to_major: 0
minor_to_major: 5
minor_to_major: 4
minor_to_major: 6
minor_to_major: 8
minor_to_major: 7
minor_to_major: 6
minor_to_major: 9
physical_shape { element_type: -562 }
})pb",
&proto));
Shape shape(proto);
EXPECT_FALSE(ShapeUtil::ValidateShape(shape).ok());
}
TEST(ShapeUtilTest, Int4ShapeSize) {
Shape int4_shape = ShapeUtil::MakeShape(S4, {64, 128});
int4_shape.mutable_layout()->set_element_size_in_bits(4);
EXPECT_EQ(ShapeUtil::ArrayDataSize(int4_shape), 64 * 128 / 2);
EXPECT_EQ(ShapeUtil::ArraySize(int4_shape), 64 * 128 / 2);
Shape int4_shape2 = ShapeUtil::MakeShape(S4, {9216, 6144});
auto* layout = int4_shape2.mutable_layout();
layout->clear_tiles();
layout->add_tiles();
layout->add_tiles();
*layout->mutable_tiles(0) = Tile({8 * (32 / 4), 128});
*layout->mutable_tiles(1) = Tile({32 / 4, 1});
layout->set_element_size_in_bits(4);
EXPECT_EQ(ShapeUtil::ArrayDataSize(int4_shape2), 9216 * 6144 / 2);
EXPECT_EQ(ShapeUtil::ArraySize(int4_shape2), 9216 * 6144 / 2);
Shape pred_shape = ShapeUtil::ChangeElementType(int4_shape, PRED);
EXPECT_EQ(pred_shape.layout().element_size_in_bits(), 0);
Shape u4_shape = ShapeUtil::ChangeElementType(int4_shape, U4);
EXPECT_EQ(u4_shape.layout().element_size_in_bits(), 4);
}
TEST(XlaShapeUtilTest, ZeroSize) {
std::vector<std::vector<int64_t>> test_cases = {
{0, 64, 128}, {128, 0, 64}, {64, 128, 0},
{0, 63, 127}, {127, 0, 63}, {63, 127, 0},
};
for (const auto& dimensions : test_cases) {
xla::Shape int4_shape = xla::ShapeUtil::MakeShape(xla::S4, dimensions);
int4_shape.mutable_layout()->set_element_size_in_bits(4);
EXPECT_EQ(xla::ShapeUtil::ArrayDataSize(int4_shape), 0);
EXPECT_EQ(xla::ShapeUtil::ArraySize(int4_shape), 0);
}
}
TEST(ShapeUtilTest, DecomposeBitcastToReshape) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {1, 16, 17, 3}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {1, 0});
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
EXPECT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition));
}
TEST(ShapeUtilTest, DecomposeBitcastToReshape2) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {17, 3, 1, 16}, {1, 0, 3, 2});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {0, 1});
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
EXPECT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposition));
}
TEST(ShapeUtilTest, DecomposeBitcastToTranspose) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 7, 6, 4}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 6, 4, 7}, {2, 1, 3, 0});
const std::vector<int64_t> kExpectedTransposeDims = {0, 2, 3, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposition));
ShapeUtil::BitcastDecompositionTranspose decomposition_transpose =
std::get<ShapeUtil::BitcastDecompositionTranspose>(decomposition);
EXPECT_EQ(decomposition_transpose.transpose_dims, kExpectedTransposeDims);
}
TEST(ShapeUtilTest, DecomposeBitcastToReshapeAndTranspose) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {0, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 1, 2};
const Shape kExpectedTranspose1Shape = kInputShape;
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {1, 0};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToReshapeAndTranspose2) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3, 7}, {3, 2, 1, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {7, 16, 51}, {0, 2, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 1, 2, 3};
const Shape kExpectedTranspose1Shape = kInputShape;
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51, 7}, {2, 1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {2, 0, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToTransposeAndReshape) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 3, 17}, {1, 2, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {1, 0});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 2, 1};
const Shape kExpectedTranspose1Shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kExpectedReshapeShape = kOutputShape;
const std::vector<int64_t> kExpectedTranspose2Dims = {0, 1};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_TRUE(decomposition_trt.IsTranspose2Identity());
}
TEST(ShapeUtilTest, DecomposeBitcastToTrt) {
const Shape kInputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 3, 17}, {1, 2, 0});
const Shape kOutputShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 51}, {0, 1});
const std::vector<int64_t> kExpectedTranspose1Dims = {0, 2, 1};
const Shape kExpectedTranspose1Shape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {16, 17, 3}, {2, 1, 0});
const Shape kExpectedReshapeShape =
ShapeUtil::MakeShapeWithDenseLayout(F32, {51, 16}, {1, 0});
const std::vector<int64_t> kExpectedTranspose2Dims = {1, 0};
ShapeUtil::BitcastDecomposition decomposition =
ShapeUtil::DecomposeBitcast(kInputShape, kOutputShape);
ASSERT_TRUE(std::holds_alternative<ShapeUtil::BitcastDecompositionTrt>(
decomposition));
ShapeUtil::BitcastDecompositionTrt decomposition_trt =
std::get<ShapeUtil::BitcastDecompositionTrt>(decomposition);
EXPECT_EQ(decomposition_trt.transpose1_dims, kExpectedTranspose1Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose1Identity());
EXPECT_EQ(decomposition_trt.transpose1_shape, kExpectedTranspose1Shape);
EXPECT_EQ(decomposition_trt.reshape_shape, kExpectedReshapeShape);
EXPECT_EQ(decomposition_trt.transpose2_dims, kExpectedTranspose2Dims);
EXPECT_FALSE(decomposition_trt.IsTranspose2Identity());
}
TEST(AlgebraicSimplifierTest, ReshapeIsBitcast_3x2x2_6x2_Dim0IsMostMinor) {
EXPECT_FALSE(ShapeUtil::ReshapeIsBitcast(
ShapeUtil::MakeShapeWithDenseLayout(F32, {3, 2, 2}, {0, 1, 2}),
ShapeUtil::MakeShapeWithDenseLayout(F32, {6, 2}, {0, 1})));
}
TEST(AlignmentTest, AlignLayoutsWithoutTrivialDimensions) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{3, 2, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 7, 5, 11}));
EXPECT_TRUE(aligned_shape);
EXPECT_THAT(aligned_shape.value().layout().minor_to_major(),
ElementsAre(4, 3, 2, 1, 0, 5));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {3, 2, 4, 35, 11}));
EXPECT_TRUE(aligned_shape);
EXPECT_THAT(aligned_shape.value().layout().minor_to_major(),
ElementsAre(3, 2, 1, 0, 4));
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithTrivialDimensions) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(
xla::F32, {1, 3, 8, 1, 5, 7, 1, 11, 1, 1},
{5, 0, 4, 2, 1, 3, 6, 7, 9, 8});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {1, 4, 1, 3, 2, 7, 5, 11, 1}));
EXPECT_TRUE(aligned_shape);
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithAllTrivialDimensions) {
Shape input =
ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {1, 1, 1, 1}, {0, 1, 3, 2});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {1, 1, 1, 1, 1}));
EXPECT_TRUE(aligned_shape);
EXPECT_TRUE(ShapeUtil::ReshapeIsBitcast(input, aligned_shape.value()));
}
TEST(AlignmentTest, AlignLayoutsWithoutTrivialDimensionsWrongInputLayout) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{2, 3, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 7, 5, 11}));
EXPECT_FALSE(aligned_shape);
}
TEST(AlignmentTest,
AlignLayoutsWithoutTrivialDimensionsNonConsecutiveAlignmentPart) {
Shape input = ShapeUtil::MakeShapeWithDenseLayout(xla::F32, {3, 8, 5, 7, 11},
{3, 2, 1, 0, 4});
auto aligned_shape = ShapeUtil::AlignLayouts(
input, ShapeUtil::MakeShape(xla::F32, {4, 3, 2, 5, 77}));
EXPECT_FALSE(aligned_shape);
}
void BM_MakeShape(::testing::benchmark::State& state) {
for (auto s : state) {
ShapeUtil::MakeShape(F32, {2});
}
}
BENCHMARK(BM_MakeShape);
void BM_MakeValidatedShape(::testing::benchmark::State& state) {
for (auto s : state) {
ShapeUtil::MakeValidatedShape(F32, {2}).value();
}
}
BENCHMARK(BM_MakeValidatedShape);
Shape ShapeForBenchmark(::testing::benchmark::State& state) {
Shape shape;
switch (state.range(0)) {
case 0: {
shape = ShapeUtil::MakeShape(xla::F32, {1});
break;
}
case 1: {
shape = ShapeUtil::MakeShape(xla::F32, {4, 1});
break;
}
case 2: {
shape = ShapeUtil::MakeShape(xla::F32, {256, 1, 1024});
break;
}
}
state.SetLabel(shape.ToString());
return shape;
}
void BM_ForEachIndex(::testing::benchmark::State& state) {
Shape shape = ShapeForBenchmark(state);
for (auto s : state) {
int count = 0;
auto increment_func =
[&count](absl::Span<const int64_t> indexes) -> absl::StatusOr<bool> {
count++;
return true;
};
ShapeUtil::ForEachIndex(shape, increment_func);
}
}
BENCHMARK(BM_ForEachIndex)->Arg(0)->Arg(1)->Arg(2);
void BM_ForEachIndexNoStatus(::testing::benchmark::State& state) {
Shape shape = ShapeForBenchmark(state);
for (auto s : state) {
int count = 0;
auto increment_func = [&count](absl::Span<const int64_t> indexes) -> bool {
count++;
return true;
};
ShapeUtil::ForEachIndexNoStatus(shape, increment_func);
}
}
BENCHMARK(BM_ForEachIndexNoStatus)->Arg(0)->Arg(1)->Arg(2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/shape_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/shape_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
472804a5-8f92-4078-b679-b52c4d682391 | cpp | tensorflow/tensorflow | mlir_fusion_emitter | third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter.cc | third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter_test.cc | #include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Support/Casting.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ComplexToStandard/ComplexToStandard.h"
#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlow.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
#include "mlir/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
#include "mlir/Transforms/Passes.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/mlir/tools/mlir_replay/public/compiler_trace.pb.h"
#include "xla/mlir/tools/mlir_replay/public/compiler_trace_instrumentation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/fusions/transforms/passes.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tsl/framework/mlir/status_scoped_diagnostic_handler.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::Value;
using mlir::ValueRange;
using mlir::func::FuncOp;
void AddRanges(llvm::Function* func, const LaunchDimensions& launch_dims,
llvm::Module* module) {
for (auto& block : *func) {
for (auto& instr : block) {
if (auto* call = llvm::dyn_cast<llvm::CallInst>(&instr)) {
if (auto* callee = call->getCalledFunction()) {
switch (callee->getIntrinsicID()) {
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().x, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().y, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().z, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().x, call,
module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().y, call,
module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().z, call,
module);
break;
}
}
}
}
}
}
bool Needs64Bits(const Shape& shape) {
return shape.IsArray() ? !IsInt32(ShapeUtil::ElementsIn(shape))
: absl::c_any_of(shape.tuple_shapes(), Needs64Bits);
}
bool Is64BitIndex(const HloInstruction* instr, int operand) {
const auto& shape = instr->operand(operand)->shape();
return shape.element_type() == PrimitiveType::S64 ||
shape.element_type() == PrimitiveType::U64;
}
bool Needs64BitIndices(const HloComputation* computation) {
for (auto* instr : computation->instructions()) {
switch (instr->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
for (int i = 1; i < instr->operand_count(); ++i) {
if (Is64BitIndex(instr, i)) return true;
}
break;
case HloOpcode::kGather:
case HloOpcode::kScatter:
CHECK(instr->shape().IsArray()) << "Variadic scatter is unsupported.";
if (Is64BitIndex(instr, 1)) return true;
break;
default:
break;
}
if (Needs64Bits(instr->shape()) ||
absl::c_any_of(instr->called_computations(), Needs64BitIndices)) {
return true;
}
}
return false;
}
}
Value MlirFusionEmitterBase::EmitBlockId(mlir::ImplicitLocOpBuilder& builder,
int dim) const {
const auto& counts = launch_dimensions().block_counts();
int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z;
auto block_id = builder.create<mlir::gpu::BlockIdOp>(
static_cast<mlir::gpu::Dimension>(dim));
block_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1}));
return block_id;
}
Value MlirFusionEmitterBase::EmitThreadId(mlir::ImplicitLocOpBuilder& builder,
int dim) const {
const auto& counts = launch_dimensions().thread_counts_per_block();
int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z;
auto thread_id = builder.create<mlir::gpu::ThreadIdOp>(
static_cast<mlir::gpu::Dimension>(dim));
thread_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1}));
return thread_id;
}
llvm::SmallVector<Value> MlirFusionEmitterBase::EmitThreadAndBlockIds(
mlir::ImplicitLocOpBuilder& builder) const {
auto& b = builder;
return {EmitThreadId(b, 0), EmitThreadId(b, 1), EmitThreadId(b, 2),
EmitBlockId(b, 0), EmitBlockId(b, 1), EmitBlockId(b, 2)};
}
absl::StatusOr<FusionEmissionResult> MlirFusionEmitterBase::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
VLOG(4) << "Fusion: " << fusion.fused_instructions_computation()->ToString();
TF_ASSIGN_OR_RETURN(
auto args,
KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion));
auto launch_dims = launch_dimensions();
auto [status_or_entry, cached] =
ir_emitter_context.kernel_cache().GetWithStatus(
fusion.fused_instructions_computation(), args.args(),
"",
[&]() -> absl::StatusOr<KernelReuseCache::Entry> {
std::string kernel_name =
ir_emitter_context.name_uniquer()->GetUniqueName(
llvm_ir::SanitizeFunctionName(std::string(fusion.name())));
if (ir_emitter_context.emit_kernels()) {
TF_ASSIGN_OR_RETURN(
auto module,
CreateLLVMModule(
*ir_emitter_context.mlir_context(),
ir_emitter_context.llvm_module()->getContext(),
ir_emitter_context.gpu_device_info(), fusion, kernel_name,
&ir_emitter_context.buffer_assignment()));
auto* kernel_func = module->getFunction(kernel_name);
AddRanges(kernel_func, launch_dims, module.get());
auto* target = ir_emitter_context.llvm_module();
module->setDataLayout(target->getDataLayout());
module->setTargetTriple(target->getTargetTriple());
llvm::IRBuilder<> builder(module->getContext());
AnnotateFunctionAsGpuKernel(module.get(), kernel_func, &builder);
TF_RETURN_IF_ERROR(AnnotateKernelLaunchDimensions(
ir_emitter_context.gpu_device_info(), launch_dims,
kernel_name, module.get()));
CHECK(!llvm::Linker::linkModules(
*target, std::move(module),
llvm::Linker::Flags::OverrideFromSrc));
} else {
VLOG(3) << "Skipped kernel compilation.";
}
return KernelReuseCache::Entry{kernel_name, launch_dims,
std::nullopt,
0};
});
TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry);
if (cached) {
VLOG(3) << "Reuse: " << fusion.name() << " -> " << entry->kernel_name;
}
FusionEmissionResult result;
result.thunks.emplace_back(std::make_unique<KernelThunk>(
&fusion, entry->kernel_name, args.args(), launch_dims, entry->cluster_dim,
entry->shmem_bytes));
return result;
}
absl::StatusOr<std::unique_ptr<llvm::Module>>
MlirFusionEmitterBase::CreateLLVMModule(
mlir::MLIRContext& mlir_context, llvm::LLVMContext& llvm_context,
const se::DeviceDescription& device, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment) const {
HloModule* hlo_module = fusion.GetModule();
std::unique_ptr<mlir::interpreter::MlirCompilationTrace> trace = nullptr;
if (DumpingEnabledForHloModule(*hlo_module) &&
DumpingEnabledForHloPass("mlir-fusion-emitter",
hlo_module->config().debug_options())) {
trace = std::make_unique<mlir::interpreter::MlirCompilationTrace>();
}
TF_ASSIGN_OR_RETURN(
auto module, CreateMLIRModule(mlir_context, fusion, entry_function_name,
buffer_assignment));
mlir::PassManager pm(&mlir_context);
AddXlaGpuOpsOptimizationPasses(pm);
AddLoopTransformationPasses(pm);
AddLoweringPasses(pm, device);
auto pipeline_status = RunPassPipeline(module.get(), pm, trace.get());
if (trace) {
DumpPerModuleProtobufToFile(
*hlo_module, *trace, hlo_module->config().debug_options(),
absl::StrCat(entry_function_name, ".mlir-trace"));
}
TF_RETURN_IF_ERROR(pipeline_status);
auto llvm_module = mlir::translateModuleToLLVMIR(module.get(), llvm_context);
TF_RET_CHECK(llvm_module != nullptr)
<< "Failed to translate module to LLVM IR.";
return llvm_module;
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>>
MlirFusionEmitterBase::CreateMLIRModule(
mlir::MLIRContext& context, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment,
mlir::interpreter::MlirCompilationTrace* trace) const {
context.loadDialect<mlir::DLTIDialect, mlir::NVVM::NVVMDialect,
mlir::ROCDL::ROCDLDialect, mlir::affine::AffineDialect,
mlir::arith::ArithDialect, mlir::cf::ControlFlowDialect,
mlir::func::FuncDialect, mlir::gpu::GPUDialect,
mlir::math::MathDialect, mlir::mhlo::MhloDialect,
mlir::scf::SCFDialect, mlir::tensor::TensorDialect,
mlir::vector::VectorDialect, xla::gpu::XlaGpuDialect>();
mlir::DialectRegistry registry;
mlir::LLVM::registerInlinerInterface(registry);
mlir::func::registerInlinerExtension(registry);
mlir::registerBuiltinDialectTranslation(registry);
mlir::registerLLVMDialectTranslation(registry);
mlir::registerNVVMDialectTranslation(registry);
mlir::registerROCDLDialectTranslation(registry);
context.appendDialectRegistry(registry);
mlir::OpBuilder builder(&context);
auto loc = mlir::NameLoc::get(builder.getStringAttr(fusion.name()));
mlir::OwningOpRef<mlir::ModuleOp> module = llvm_ir::CreateMlirModuleOp(loc);
SmallVector<mlir::Type> param_types;
std::optional<KernelArguments> args;
if (buffer_assignment != nullptr) {
TF_ASSIGN_OR_RETURN(args,
KernelArguments::Create(*buffer_assignment, &fusion));
}
int next_slice_index = 0;
absl::flat_hash_map<BufferAllocation::Slice, std::optional<int>>
slice_indices;
auto get_arg_attrs = [&](int index) -> absl::StatusOr<mlir::Attribute> {
if (!args) {
return builder.getDictionaryAttr({builder.getNamedAttr(
"xla.slice_index", builder.getIndexAttr(next_slice_index++))});
}
const auto& arg = args->args()[index];
SmallVector<mlir::NamedAttribute> attrs;
attrs.push_back(builder.getNamedAttr(
"xla.slice_index", builder.getIndexAttr(arg.llvm_arg_index())));
attrs.push_back(
builder.getNamedAttr(mlir::LLVM::LLVMDialect::getAlignAttrName(),
builder.getIndexAttr(arg.alignment())));
attrs.push_back(builder.getNamedAttr(
mlir::LLVM::LLVMDialect::getDereferenceableAttrName(),
builder.getIndexAttr(arg.slice().size())));
if (!arg.written()) {
attrs.push_back(
builder.getNamedAttr("xla.invariant", builder.getUnitAttr()));
}
return builder.getDictionaryAttr(attrs);
};
SmallVector<mlir::Attribute> arg_attrs;
int arg_index = 0;
for (auto* param : fusion.operands()) {
param_types.push_back(
mlir_converter::TensorShapeToMlirType(param->shape(), builder));
TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(), get_arg_attrs(arg_index++));
}
auto result_types = mlir_converter::ShapeToMlirTypes(fusion.shape(), builder);
param_types.append(result_types.begin(), result_types.end());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
fusion.shape(), [&](const auto& shape, const ShapeIndex& index) {
if (shape.IsArray()) {
TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(),
get_arg_attrs(arg_index++));
}
return absl::OkStatus();
}));
builder.setInsertionPointToStart(module->getBody());
auto entry_func = builder.create<FuncOp>(
loc, entry_function_name,
mlir::FunctionType::get(&context, param_types, result_types),
mlir::StringAttr{},
mlir::ArrayAttr::get(&context, arg_attrs),
mlir::ArrayAttr{});
entry_func->setAttr("xla.entry", mlir::UnitAttr::get(&context));
TF_RETURN_IF_ERROR(EmitMlir(module.get(), entry_func, fusion));
return module;
}
absl::Status MlirFusionEmitterBase::EmitMlir(
mlir::ModuleOp module, FuncOp entry_function,
const HloFusionInstruction& fusion) const {
std::vector<mlir_converter::EpilogueSpecification> epilogues =
GetEpilogues(fusion, module->getContext());
mlir_converter::PartitionedComputations computations(
fusion.fused_instructions_computation(), module->getContext(), epilogues);
auto subgraph_to_mlir_fn = computations.DeclareFunctions(module);
for (const auto& epilogue : epilogues) {
for (auto* custom : epilogue.heroes) {
if (custom->user_count() == 0) {
subgraph_to_mlir_fn.extract(&computations.FindSubgraph(custom))
.mapped()
.erase();
}
}
}
auto* root = fusion.fused_instructions_computation()->root_instruction();
if (root->opcode() == HloOpcode::kTuple && !epilogues.empty()) {
subgraph_to_mlir_fn.extract(&computations.FindSubgraph(root))
.mapped()
.erase();
}
auto call_targets =
computations.CreateCallTargetProvider(subgraph_to_mlir_fn);
for (const auto& comp : computations.partitioned_computations()) {
for (const auto& subgraph : comp.subgraphs()) {
if (subgraph_to_mlir_fn.contains(&subgraph)) {
TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction(
comp, subgraph, subgraph_to_mlir_fn[&subgraph], call_targets));
}
}
}
for (const auto& epilogue : computations.epilogues()) {
if (epilogue.roots.empty()) continue;
TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction(
computations.FindPartitionedComputation(
fusion.fused_instructions_computation()),
epilogue, subgraph_to_mlir_fn[&epilogue], call_targets));
}
int index_bitwidth =
Needs64BitIndices(fusion.fused_instructions_computation()) ? 64 : 32;
mlir::OpBuilder b(module->getContext());
auto index_layout = mlir::DataLayoutEntryAttr::get(
b.getIndexType(), b.getI32IntegerAttr(index_bitwidth));
module->setAttr(
mlir::DLTIDialect::kDataLayoutAttrName,
mlir::DataLayoutSpecAttr::get(module->getContext(), {index_layout}));
return EmitEntryFunction(computations, call_targets, entry_function, fusion);
}
absl::flat_hash_map<const HloInstruction*, ValueRange>
MlirFusionEmitterBase::EmitEpilogue(
int epilogue_index,
const mlir_converter::PartitionedComputations& computations,
FuncOp entry_fn,
const absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<Value>>&
injected,
ValueRange output_indices, mlir::ImplicitLocOpBuilder& builder) const {
const auto& epilogue = computations.epilogues().at(epilogue_index);
if (epilogue.roots.empty()) {
return {};
}
auto epilogue_fn = mlir::cast<FuncOp>(
entry_fn->getParentOfType<mlir::ModuleOp>().lookupSymbol(epilogue.name));
SmallVector<Value> operands = ValueRange(entry_fn.getArguments().take_front(
computations.fusion()->num_parameters()));
absl::c_copy(output_indices, std::back_inserter(operands));
int injected_offset = operands.size();
operands.resize(injected_offset + epilogue.num_injected_values);
for (auto [injected_instruction, start] : epilogue.injected_value_starts) {
absl::c_copy(injected.at(injected_instruction),
operands.begin() + injected_offset + start);
}
ValueRange results =
builder.create<PureCallOp>(epilogue_fn, operands).getResults();
absl::flat_hash_map<const HloInstruction*, ValueRange> results_per_root;
for (auto* root : epilogue.roots) {
int arity =
root->shape().IsTuple() ? root->shape().tuple_shapes().size() : 1;
results_per_root[root] = results.take_front(arity);
results = results.drop_front(arity);
}
CHECK_EQ(results.size(), 0);
return results_per_root;
}
absl::Status MlirFusionEmitterBase::RunPassPipeline(
mlir::ModuleOp module, mlir::PassManager& pm,
mlir::interpreter::MlirCompilationTrace* trace) const {
if (VLOG_IS_ON(5)) {
module.getContext()->disableMultithreading();
pm.enableIRPrinting();
}
if (trace) {
module.getContext()->disableMultithreading();
pm.addInstrumentation(
std::make_unique<mlir::interpreter::MlirCompilerTraceInstrumentation>(
*trace));
}
tsl::StatusScopedDiagnosticHandler diagnostic_handler(module.getContext());
(void)pm.run(module);
return diagnostic_handler.consumeStatus();
}
void AddXlaGpuOpsOptimizationPasses(mlir::OpPassManager& pm) {
pm.addNestedPass<FuncOp>(CreateSimplifyArithPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
pm.addPass(CreateEraseDeadFunctionsPass());
pm.addPass(mlir::createCSEPass());
}
void AddLoopTransformationPasses(mlir::OpPassManager& pm) {
pm.addNestedPass<FuncOp>(CreateLowerXlaGpuToScfPass());
pm.addPass(mlir::createInlinerPass({}, [&](mlir::OpPassManager& pm) {
pm.addPass(mlir::createCSEPass());
}));
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
pm.addNestedPass<FuncOp>(CreateFuseLoopsPass());
pm.addNestedPass<FuncOp>(CreatePeelLoopsPass());
pm.addNestedPass<FuncOp>(CreateLowerXlaGpuLoopsToScfPass());
pm.addPass(mlir::mhlo::createConvertToSignlessPass());
pm.addPass(CreatePropagateSliceIndicesPass());
pm.addPass(CreateFlattenTensorsPass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addNestedPass<FuncOp>(CreateUnswitchLoopsPass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addNestedPass<FuncOp>(CreateVectorizeLoadsAndStoresPass());
pm.addNestedPass<FuncOp>(CreateOptimizeLoopsPass());
}
void AddLoweringPasses(mlir::OpPassManager& pm,
const se::DeviceDescription& device) {
bool is_amd = std::holds_alternative<se::RocmComputeCapability>(
device.gpu_compute_capability());
pm.addNestedPass<FuncOp>(CreateConvertPureCallOpsPass());
pm.addPass(CreateLowerTensorsPass(
is_amd, is_amd ? device.rocm_compute_capability().gcn_arch_name()
: device.cuda_compute_capability().ToString()));
pm.addPass(mlir::createConvertComplexToStandardPass());
pm.addPass(CreateMergePointersToSameSlicePass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
pm.addNestedPass<FuncOp>(CreateSimplifyArithPass());
pm.addPass(CreateSimplifyAffinePass());
pm.addPass(mlir::createLowerAffinePass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addPass(mlir::createCSEPass());
auto maybe_convert_fp8 = MaybeCreateConvertFloatNvidiaPass(device);
if (maybe_convert_fp8.has_value()) {
pm.addPass(std::move(*maybe_convert_fp8));
}
pm.addPass(CreateExpandFloatOpsPass());
pm.addPass(mlir::createLowerAffinePass());
pm.addPass(mlir::createConvertSCFToCFPass());
pm.addPass(CreateLowerToLLVMPass(is_amd));
pm.addPass(mlir::createReconcileUnrealizedCastsPass());
}
}
} | #include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include <cstdint>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class DummyCopyFusionEmitter : public MlirFusionEmitterBase {
public:
LaunchDimensions launch_dimensions() const final { return {1, 100}; }
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t, mlir::MLIRContext*) const final {
return std::nullopt;
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t, int64_t, mlir::MLIRContext*) const final {
return std::nullopt;
}
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override {
mlir::ImplicitLocOpBuilder b(entry_function.getLoc(), entry_function);
b.setInsertionPointToStart(entry_function.addEntryBlock());
auto thread_id = EmitThreadId(b, 0);
auto value = b.create<mlir::tensor::ExtractOp>(
entry_function.getArgument(0), mlir::ValueRange{thread_id});
auto result = b.create<mlir::tensor::InsertOp>(
value, entry_function.getArgument(1), mlir::ValueRange{thread_id});
b.create<mlir::func::ReturnOp>(result->getResults());
return absl::OkStatus();
}
};
class MlirFusionEmitterTest : public HloTestBase {
protected:
MlirFusionEmitterTest() {
context_.loadDialect<mlir::tensor::TensorDialect, mlir::func::FuncDialect,
mlir::affine::AffineDialect, mlir::arith::ArithDialect,
mlir::complex::ComplexDialect, mlir::math::MathDialect,
mlir::scf::SCFDialect, mlir::mhlo::MhloDialect,
mlir::gpu::GPUDialect, mlir::NVVM::NVVMDialect,
mlir::ROCDL::ROCDLDialect>();
mlir::DialectRegistry registry;
mlir::func::registerInlinerExtension(registry);
mlir::registerBuiltinDialectTranslation(registry);
mlir::registerLLVMDialectTranslation(registry);
mlir::registerNVVMDialectTranslation(registry);
mlir::registerROCDLDialectTranslation(registry);
context_.appendDialectRegistry(registry);
}
mlir::MLIRContext context_;
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::CudaOrRocmDeviceInfo();
};
constexpr absl::string_view kModule = R"(
fused_computation {
ROOT %p0 = f32[100] parameter(0)
}
ENTRY main {
%p0 = f32[100] parameter(0)
ROOT fusion = f32[100] fusion(%p0), kind=kLoop, calls=fused_computation
})";
TEST_F(MlirFusionEmitterTest, CreateMlirModule) {
auto module = ParseAndReturnVerifiedModule(kModule).value();
DummyCopyFusionEmitter emitter;
TF_ASSERT_OK_AND_ASSIGN(
auto mlir_module,
emitter.CreateMLIRModule(
context_,
*Cast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
"fusion",
nullptr));
std::string out;
llvm::raw_string_ostream stream(out);
stream << *mlir_module;
TF_ASSERT_OK_AND_ASSIGN(auto filecheck_result, RunFileCheck(out, R"(
)"));
EXPECT_TRUE(filecheck_result);
}
TEST_F(MlirFusionEmitterTest, CreateLLVMModule) {
llvm::LLVMContext llvm_context;
auto module = ParseAndReturnVerifiedModule(kModule).value();
DummyCopyFusionEmitter emitter;
TF_ASSERT_OK_AND_ASSIGN(
auto llvm_module,
emitter.CreateLLVMModule(
context_, llvm_context, device_info_,
*Cast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
"fusion",
nullptr));
std::string out;
llvm::raw_string_ostream stream(out);
stream << *llvm_module;
TF_ASSERT_OK_AND_ASSIGN(
auto filecheck_result,
RunFileCheck(
out, absl::StrReplaceAll(
R"(
)",
{{"TIDX", device_info_.cuda_compute_capability().major == -1
? "@llvm.amdgcn.workitem.id.x"
: "@llvm.nvvm.read.ptx.sreg.tid.x"}})));
EXPECT_TRUE(filecheck_result);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
321c5dd0-c284-4e83-8f76-cc62c4ee08b6 | cpp | google/tensorstore | http_response | tensorstore/internal/http/http_response.cc | tensorstore/internal/http/http_response_test.cc | #include "tensorstore/internal/http/http_response.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "re2/re2.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_http {
const char* HttpResponseCodeToMessage(const HttpResponse& response) {
switch (response.status_code) {
case 400:
return "Bad Request";
case 401:
return "Unauthorized";
case 402:
return "Payment Required";
case 403:
return "Forbidden";
case 404:
return "Not Found";
case 405:
return "Method Not Allowed";
case 406:
return "Not Acceptable";
case 407:
return "Proxy Authentication Required";
case 408:
return "Request Timeout";
case 409:
return "Conflict";
case 410:
return "Gone";
case 411:
return "Length Required";
case 412:
return "Precondition Failed";
case 413:
return "Payload Too Large";
case 414:
return "URI Too Long";
case 415:
return "Unsupported Media Type";
case 416:
return "Range Not Satisfiable";
case 417:
return "Expectation Failed";
case 418:
return "I'm a teapot";
case 421:
return "Misdirected Request";
case 422:
return "Unprocessable Content";
case 423:
return "Locked";
case 424:
return "Failed Dependency";
case 425:
return "Too Early";
case 426:
return "Upgrade Required";
case 428:
return "Precondition Required";
case 429:
return "Too Many Requests";
case 431:
return "Request Header Fields Too Large";
case 451:
return "Unavailable For Legal Reasons";
case 500:
return "Internal Server Error";
case 501:
return "Not Implemented";
case 502:
return "Bad Gateway";
case 503:
return "Service Unavailable";
case 504:
return "Gateway Timeout";
case 505:
return "HTTP Version Not Supported";
case 506:
return "Variant Also Negotiates";
case 507:
return "Insufficient Storage";
case 508:
return "Loop Detected";
case 510:
return "Not Extended";
case 511:
return "Network Authentication Required";
default:
return nullptr;
}
}
absl::StatusCode HttpResponseCodeToStatusCode(const HttpResponse& response) {
switch (response.status_code) {
case 200:
case 201:
case 202:
case 204:
case 206:
return absl::StatusCode::kOk;
case 400:
case 411:
return absl::StatusCode::kInvalidArgument;
case 401:
case 403:
return absl::StatusCode::kPermissionDenied;
case 404:
case 410:
return absl::StatusCode::kNotFound;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
return absl::StatusCode::kFailedPrecondition;
case 416:
return absl::StatusCode::kOutOfRange;
case 308:
case 408:
case 409:
case 429:
case 500:
case 502:
case 503:
case 504:
return absl::StatusCode::kUnavailable;
}
if (response.status_code < 300) {
return absl::StatusCode::kOk;
}
return absl::StatusCode::kUnknown;
}
absl::Status HttpResponseCodeToStatus(const HttpResponse& response,
SourceLocation loc) {
auto code = HttpResponseCodeToStatusCode(response);
if (code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
auto status_message = HttpResponseCodeToMessage(response);
if (!status_message) status_message = "Unknown";
absl::Status status(code, status_message);
if (!response.payload.empty()) {
status.SetPayload(
"http_response_body",
response.payload.Subcord(
0, response.payload.size() < 256 ? response.payload.size() : 256));
}
MaybeAddSourceLocation(status, loc);
status.SetPayload("http_response_code",
absl::Cord(tensorstore::StrCat(response.status_code)));
return status;
}
Result<ParsedContentRange> ParseContentRangeHeader(
const HttpResponse& response) {
auto it = response.headers.find("content-range");
if (it == response.headers.end()) {
if (response.status_code != 206) {
return absl::FailedPreconditionError(
tensorstore::StrCat("No Content-Range header expected with HTTP ",
response.status_code, " response"));
}
return absl::FailedPreconditionError(
"Expected Content-Range header with HTTP 206 response");
}
static const RE2 kContentRangeRegex(R"(^bytes (\d+)-(\d+)/(?:(\d+)|\*))");
int64_t a, b;
std::optional<int64_t> total_size;
if (!RE2::FullMatch(it->second, kContentRangeRegex, &a, &b, &total_size) ||
a > b || (total_size && b >= *total_size) ||
b == std::numeric_limits<int64_t>::max()) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Unexpected Content-Range header received: ", QuoteString(it->second)));
}
return ParsedContentRange{a, b + 1, total_size.value_or(-1)};
}
}
} | #include "tensorstore/internal/http/http_response.h"
#include <set>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::internal_http::HttpResponse;
TEST(HttpResponseCodeToStatusTest, AllCodes) {
using ::tensorstore::internal_http::HttpResponseCodeToStatus;
absl::flat_hash_set<int> seen;
for (auto code : {200, 201, 204, 206}) {
seen.insert(code);
EXPECT_TRUE(HttpResponseCodeToStatus({code, {}, {}}).ok()) << code;
}
for (auto code : {400, 411}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kInvalidArgument,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {401, 403}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kPermissionDenied,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {404, 410}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kNotFound,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {302, 303, 304, 307, 412, 413}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kFailedPrecondition,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {416}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kOutOfRange,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (auto code : {308, 408, 409, 429, 500, 502, 503, 504}) {
seen.insert(code);
EXPECT_EQ(absl::StatusCode::kUnavailable,
HttpResponseCodeToStatus({code, {}, {}}).code())
<< code;
}
for (int i = 300; i < 600; i++) {
if (seen.count(i) > 0) continue;
EXPECT_EQ(absl::StatusCode::kUnknown,
HttpResponseCodeToStatus({i, {}, {}}).code())
<< i;
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_response.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_response_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
a9e0d084-df21-48ff-913e-7966d428b431 | cpp | google/cel-cpp | cel_function_registry | eval/public/cel_function_registry.cc | eval/public/cel_function_registry_test.cc | #include "eval/public/cel_function_registry.h"
#include <algorithm>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "base/type_provider.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/internal/interop.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "runtime/function_overload_reference.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::extensions::ProtoMemoryManagerRef;
class ProxyToModernCelFunction : public CelFunction {
public:
ProxyToModernCelFunction(const cel::FunctionDescriptor& descriptor,
const cel::Function& implementation)
: CelFunction(descriptor), implementation_(&implementation) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
auto memory_manager = ProtoMemoryManagerRef(arena);
cel::common_internal::LegacyValueManager manager(
memory_manager, cel::TypeProvider::Builtin());
cel::FunctionEvaluationContext context(manager);
std::vector<cel::Value> modern_args =
cel::interop_internal::LegacyValueToModernValueOrDie(arena, args);
CEL_ASSIGN_OR_RETURN(auto modern_result,
implementation_->Invoke(context, modern_args));
*result = cel::interop_internal::ModernValueToLegacyValueOrDie(
arena, modern_result);
return absl::OkStatus();
}
private:
const cel::Function* implementation_;
};
}
absl::Status CelFunctionRegistry::RegisterAll(
std::initializer_list<Registrar> registrars,
const InterpreterOptions& opts) {
for (Registrar registrar : registrars) {
CEL_RETURN_IF_ERROR(registrar(this, opts));
}
return absl::OkStatus();
}
std::vector<const CelFunction*> CelFunctionRegistry::FindOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const {
std::vector<cel::FunctionOverloadReference> matched_funcs =
modern_registry_.FindStaticOverloads(name, receiver_style, types);
std::vector<const CelFunction*> results;
results.reserve(matched_funcs.size());
{
absl::MutexLock lock(&mu_);
for (cel::FunctionOverloadReference entry : matched_funcs) {
std::unique_ptr<CelFunction>& legacy_impl =
functions_[&entry.implementation];
if (legacy_impl == nullptr) {
legacy_impl = std::make_unique<ProxyToModernCelFunction>(
entry.descriptor, entry.implementation);
}
results.push_back(legacy_impl.get());
}
}
return results;
}
std::vector<const CelFunctionDescriptor*>
CelFunctionRegistry::FindLazyOverloads(
absl::string_view name, bool receiver_style,
const std::vector<CelValue::Type>& types) const {
std::vector<LazyOverload> lazy_overloads =
modern_registry_.FindLazyOverloads(name, receiver_style, types);
std::vector<const CelFunctionDescriptor*> result;
result.reserve(lazy_overloads.size());
for (const LazyOverload& overload : lazy_overloads) {
result.push_back(&overload.descriptor);
}
return result;
}
} | #include "eval/public/cel_function_registry.h"
#include <memory>
#include <tuple>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "base/kind.h"
#include "eval/internal/adapter_activation_impl.h"
#include "eval/public/activation.h"
#include "eval/public/cel_function.h"
#include "internal/testing.h"
#include "runtime/function_overload_reference.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Property;
using ::testing::SizeIs;
using ::testing::Truly;
class ConstCelFunction : public CelFunction {
public:
ConstCelFunction() : CelFunction(MakeDescriptor()) {}
explicit ConstCelFunction(const CelFunctionDescriptor& desc)
: CelFunction(desc) {}
static CelFunctionDescriptor MakeDescriptor() {
return {"ConstFunction", false, {}};
}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* output,
google::protobuf::Arena* arena) const override {
*output = CelValue::CreateInt64(42);
return absl::OkStatus();
}
};
TEST(CelFunctionRegistryTest, InsertAndRetrieveLazyFunction) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
Activation activation;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
const auto descriptors =
registry.FindLazyOverloads("LazyFunction", false, {});
EXPECT_THAT(descriptors, testing::SizeIs(1));
}
TEST(CelFunctionRegistryTest, LazyAndStaticFunctionShareDescriptorSpace) {
CelFunctionRegistry registry;
CelFunctionDescriptor desc = ConstCelFunction::MakeDescriptor();
ASSERT_OK(registry.RegisterLazyFunction(desc));
absl::Status status = registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>());
EXPECT_FALSE(status.ok());
}
TEST(CelFunctionRegistryTest, FindStaticOverloadsReturns) {
CelFunctionRegistry registry;
CelFunctionDescriptor desc = ConstCelFunction::MakeDescriptor();
ASSERT_OK(registry.Register(desc, std::make_unique<ConstCelFunction>(desc)));
std::vector<cel::FunctionOverloadReference> overloads =
registry.FindStaticOverloads(desc.name(), false, {});
EXPECT_THAT(overloads,
ElementsAre(Truly(
[](const cel::FunctionOverloadReference& overload) -> bool {
return overload.descriptor.name() == "ConstFunction";
})))
<< "Expected single ConstFunction()";
}
TEST(CelFunctionRegistryTest, ListFunctions) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>()));
auto registered_functions = registry.ListFunctions();
EXPECT_THAT(registered_functions, SizeIs(2));
EXPECT_THAT(registered_functions["LazyFunction"], SizeIs(1));
EXPECT_THAT(registered_functions["ConstFunction"], SizeIs(1));
}
TEST(CelFunctionRegistryTest, LegacyFindLazyOverloads) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
ASSERT_OK(registry.RegisterLazyFunction(lazy_function_desc));
ASSERT_OK(registry.Register(ConstCelFunction::MakeDescriptor(),
std::make_unique<ConstCelFunction>()));
EXPECT_THAT(registry.FindLazyOverloads("LazyFunction", false, {}),
ElementsAre(Truly([](const CelFunctionDescriptor* descriptor) {
return descriptor->name() == "LazyFunction";
})))
<< "Expected single lazy overload for LazyFunction()";
}
TEST(CelFunctionRegistryTest, DefaultLazyProvider) {
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
CelFunctionRegistry registry;
Activation activation;
cel::interop_internal::AdapterActivationImpl modern_activation(activation);
EXPECT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(activation.InsertFunction(
std::make_unique<ConstCelFunction>(lazy_function_desc)));
auto providers = registry.ModernFindLazyOverloads("LazyFunction", false, {});
EXPECT_THAT(providers, testing::SizeIs(1));
ASSERT_OK_AND_ASSIGN(auto func, providers[0].provider.GetFunction(
lazy_function_desc, modern_activation));
ASSERT_TRUE(func.has_value());
EXPECT_THAT(func->descriptor,
Property(&cel::FunctionDescriptor::name, Eq("LazyFunction")));
}
TEST(CelFunctionRegistryTest, DefaultLazyProviderNoOverloadFound) {
CelFunctionRegistry registry;
Activation legacy_activation;
cel::interop_internal::AdapterActivationImpl activation(legacy_activation);
CelFunctionDescriptor lazy_function_desc{"LazyFunction", false, {}};
EXPECT_OK(registry.RegisterLazyFunction(lazy_function_desc));
EXPECT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(lazy_function_desc)));
const auto providers =
registry.ModernFindLazyOverloads("LazyFunction", false, {});
ASSERT_THAT(providers, testing::SizeIs(1));
const auto& provider = providers[0].provider;
auto func = provider.GetFunction({"LazyFunc", false, {cel::Kind::kInt64}},
activation);
ASSERT_OK(func.status());
EXPECT_EQ(*func, absl::nullopt);
}
TEST(CelFunctionRegistryTest, DefaultLazyProviderAmbiguousLookup) {
CelFunctionRegistry registry;
Activation legacy_activation;
cel::interop_internal::AdapterActivationImpl activation(legacy_activation);
CelFunctionDescriptor desc1{"LazyFunc", false, {CelValue::Type::kInt64}};
CelFunctionDescriptor desc2{"LazyFunc", false, {CelValue::Type::kUint64}};
CelFunctionDescriptor match_desc{"LazyFunc", false, {CelValue::Type::kAny}};
ASSERT_OK(registry.RegisterLazyFunction(match_desc));
ASSERT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(desc1)));
ASSERT_OK(legacy_activation.InsertFunction(
std::make_unique<ConstCelFunction>(desc2)));
auto providers =
registry.ModernFindLazyOverloads("LazyFunc", false, {cel::Kind::kAny});
ASSERT_THAT(providers, testing::SizeIs(1));
const auto& provider = providers[0].provider;
auto func = provider.GetFunction(match_desc, activation);
EXPECT_THAT(std::string(func.status().message()),
HasSubstr("Couldn't resolve function"));
}
TEST(CelFunctionRegistryTest, CanRegisterNonStrictFunction) {
{
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("NonStrictFunction",
false,
{CelValue::Type::kAny},
false);
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
EXPECT_THAT(registry.FindStaticOverloads("NonStrictFunction", false,
{CelValue::Type::kAny}),
SizeIs(1));
}
{
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("NonStrictLazyFunction",
false,
{CelValue::Type::kAny},
false);
EXPECT_OK(registry.RegisterLazyFunction(descriptor));
EXPECT_THAT(registry.FindLazyOverloads("NonStrictLazyFunction", false,
{CelValue::Type::kAny}),
SizeIs(1));
}
}
using NonStrictTestCase = std::tuple<bool, bool>;
using NonStrictRegistrationFailTest = testing::TestWithParam<NonStrictTestCase>;
TEST_P(NonStrictRegistrationFailTest,
IfOtherOverloadExistsRegisteringNonStrictFails) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
true);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
false);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("Only one overload")));
}
TEST_P(NonStrictRegistrationFailTest,
IfOtherNonStrictExistsRegisteringStrictFails) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
false);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
true);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_THAT(status, StatusIs(absl::StatusCode::kAlreadyExists,
HasSubstr("Only one overload")));
}
TEST_P(NonStrictRegistrationFailTest, CanRegisterStrictFunctionsWithoutLimit) {
bool existing_function_is_lazy, new_function_is_lazy;
std::tie(existing_function_is_lazy, new_function_is_lazy) = GetParam();
CelFunctionRegistry registry;
CelFunctionDescriptor descriptor("OverloadedFunction",
false,
{CelValue::Type::kAny},
true);
if (existing_function_is_lazy) {
ASSERT_OK(registry.RegisterLazyFunction(descriptor));
} else {
ASSERT_OK(registry.Register(
descriptor, std::make_unique<ConstCelFunction>(descriptor)));
}
CelFunctionDescriptor new_descriptor(
"OverloadedFunction",
false, {CelValue::Type::kAny, CelValue::Type::kAny},
true);
absl::Status status;
if (new_function_is_lazy) {
status = registry.RegisterLazyFunction(new_descriptor);
} else {
status = registry.Register(
new_descriptor, std::make_unique<ConstCelFunction>(new_descriptor));
}
EXPECT_OK(status);
}
INSTANTIATE_TEST_SUITE_P(NonStrictRegistrationFailTest,
NonStrictRegistrationFailTest,
testing::Combine(testing::Bool(), testing::Bool()));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/cel_function_registry.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/cel_function_registry_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
d8539108-120a-419d-853b-1421467cce6e | cpp | abseil/abseil-cpp | has_absl_stringify | absl/strings/has_absl_stringify.h | absl/strings/has_absl_stringify_test.cc | #ifndef ABSL_STRINGS_HAS_ABSL_STRINGIFY_H_
#define ABSL_STRINGS_HAS_ABSL_STRINGIFY_H_
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
class UnimplementedSink {
public:
void Append(size_t count, char ch);
void Append(string_view v);
friend void AbslFormatFlush(UnimplementedSink* sink, absl::string_view v);
};
}
template <typename T, typename = void>
struct HasAbslStringify : std::false_type {};
template <typename T>
struct HasAbslStringify<
T, std::enable_if_t<std::is_void<decltype(AbslStringify(
std::declval<strings_internal::UnimplementedSink&>(),
std::declval<const T&>()))>::value>> : std::true_type {};
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/has_absl_stringify.h"
#include <string>
#include "gtest/gtest.h"
#include "absl/types/optional.h"
namespace {
struct TypeWithoutAbslStringify {};
struct TypeWithAbslStringify {
template <typename Sink>
friend void AbslStringify(Sink&, const TypeWithAbslStringify&) {}
};
TEST(HasAbslStringifyTest, Works) {
EXPECT_FALSE(absl::HasAbslStringify<int>::value);
EXPECT_FALSE(absl::HasAbslStringify<std::string>::value);
EXPECT_FALSE(absl::HasAbslStringify<TypeWithoutAbslStringify>::value);
EXPECT_TRUE(absl::HasAbslStringify<TypeWithAbslStringify>::value);
EXPECT_FALSE(
absl::HasAbslStringify<absl::optional<TypeWithAbslStringify>>::value);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/has_absl_stringify.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/has_absl_stringify_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
fc5aac15-bf0a-45c4-8f6a-280c6aa505ab | cpp | google/arolla | common_qtype | arolla/qtype/standard_type_properties/common_qtype.cc | arolla/qtype/standard_type_properties/common_qtype_test.cc | #include "arolla/qtype/standard_type_properties/common_qtype.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
namespace arolla {
namespace {
const QType* CommonScalarQType(const QType* lhs_qtype, const QType* rhs_qtype) {
if (lhs_qtype == rhs_qtype) {
return lhs_qtype;
}
if (lhs_qtype == GetWeakFloatQType()) {
lhs_qtype = GetQType<float>();
}
if (rhs_qtype == GetWeakFloatQType()) {
rhs_qtype = GetQType<float>();
}
static const std::array numeric_types = {
GetQType<double>(), GetQType<float>(), GetQType<int64_t>(),
GetQType<int32_t>()};
auto lhs_it = absl::c_find(numeric_types, lhs_qtype);
auto rhs_it = absl::c_find(numeric_types, rhs_qtype);
if (lhs_it != numeric_types.end() && rhs_it != numeric_types.end()) {
return *std::min(lhs_it, rhs_it);
}
return nullptr;
}
const ShapeQType* CommonShapeQType(const ShapeQType* lhs_qtype,
const ShapeQType* rhs_qtype,
bool enable_broadcasting) {
if (lhs_qtype == rhs_qtype) {
return rhs_qtype;
}
if (!enable_broadcasting &&
(IsArrayLikeShapeQType(lhs_qtype) || IsArrayLikeShapeQType(rhs_qtype))) {
return nullptr;
}
if (lhs_qtype == GetQType<ScalarShape>()) {
return rhs_qtype;
}
if (rhs_qtype == GetQType<ScalarShape>()) {
return lhs_qtype;
}
if (lhs_qtype == GetQType<OptionalScalarShape>()) {
return rhs_qtype;
}
if (rhs_qtype == GetQType<OptionalScalarShape>()) {
return lhs_qtype;
}
return nullptr;
}
}
const QType* CommonQType(const QType* lhs_qtype, const QType* rhs_qtype,
bool enable_broadcasting) {
if (lhs_qtype == nullptr || rhs_qtype == nullptr) {
return nullptr;
}
if (lhs_qtype == rhs_qtype) {
return lhs_qtype;
}
const QType* scalar_qtype;
{
auto lhs_scalar_qtype = GetScalarQTypeOrNull(lhs_qtype);
if (lhs_scalar_qtype == nullptr) {
return nullptr;
}
auto rhs_scalar_qtype = GetScalarQTypeOrNull(rhs_qtype);
if (rhs_scalar_qtype == nullptr) {
return nullptr;
}
scalar_qtype = CommonScalarQType(lhs_scalar_qtype, rhs_scalar_qtype);
if (scalar_qtype == nullptr) {
return nullptr;
}
}
const ShapeQType* shape_qtype =
CommonShapeQType(GetShapeQTypeOrNull(lhs_qtype),
GetShapeQTypeOrNull(rhs_qtype), enable_broadcasting);
if (shape_qtype == nullptr) {
return nullptr;
}
return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr);
}
bool CanCastImplicitly(QTypePtr from_qtype, QTypePtr to_qtype,
bool enable_broadcasting) {
return to_qtype != nullptr &&
CommonQType(from_qtype, to_qtype, enable_broadcasting) == to_qtype;
}
const QType* CommonQType(absl::Span<const QType* const> qtypes,
bool enable_broadcasting) {
if (qtypes.empty()) {
return nullptr;
}
const QType* result = qtypes[0];
for (const QType* qtype : qtypes.subspan(1)) {
result = CommonQType(result, qtype, enable_broadcasting);
}
return result;
}
const QType* BroadcastQType(absl::Span<QType const* const> target_qtypes,
const QType* qtype) {
if (absl::c_any_of(target_qtypes,
[](auto* qtype) { return qtype == nullptr; }) ||
qtype == nullptr) {
return nullptr;
}
const ShapeQType* shape_qtype = GetShapeQTypeOrNull(qtype);
for (const auto* target_qtype : target_qtypes) {
shape_qtype =
CommonShapeQType(shape_qtype, GetShapeQTypeOrNull(target_qtype),
true);
}
if (shape_qtype == nullptr) {
return nullptr;
}
auto* scalar_qtype = GetScalarQTypeOrNull(qtype);
if (scalar_qtype == nullptr) {
return nullptr;
}
return shape_qtype->WithValueQType(scalar_qtype).value_or(nullptr);
}
} | #include "arolla/qtype/standard_type_properties/common_qtype.h"
#include <algorithm>
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/meta.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::testing::IsFalse;
using ::testing::IsNull;
using ::testing::IsTrue;
const QType* ReferenceCommonQType(const QType* arg0, const QType* arg1,
bool enable_broadcasting_) {
if (arg0 == arg1) {
return arg0;
}
const QType* result = nullptr;
const auto gen_result = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) {
if (a0 == arg0 && a1 == arg1) {
result = r;
}
};
const auto gen_results = [&](QTypePtr a0, QTypePtr a1, QTypePtr r) {
ASSERT_OK_AND_ASSIGN(auto a0_optional, ToOptionalQType(a0));
ASSERT_OK_AND_ASSIGN(auto a0_dense_array,
GetDenseArrayQTypeByValueQType(a0));
ASSERT_OK_AND_ASSIGN(auto a0_array, GetArrayQTypeByValueQType(a0));
ASSERT_OK_AND_ASSIGN(auto a1_optional, ToOptionalQType(a1));
ASSERT_OK_AND_ASSIGN(auto a1_dense_array,
GetDenseArrayQTypeByValueQType(a1));
ASSERT_OK_AND_ASSIGN(auto a1_array, GetArrayQTypeByValueQType(a1));
ASSERT_OK_AND_ASSIGN(auto r_optional, ToOptionalQType(r));
ASSERT_OK_AND_ASSIGN(auto r_dense_array, GetDenseArrayQTypeByValueQType(r));
ASSERT_OK_AND_ASSIGN(auto r_array, GetArrayQTypeByValueQType(r));
gen_result(a0, a1, r);
gen_result(a0, a1_optional, r_optional);
gen_result(a0_optional, a1_optional, r_optional);
gen_result(a0_optional, a1, r_optional);
gen_result(a0_dense_array, a1_dense_array, r_dense_array);
gen_result(a0_array, a1_array, r_array);
if (enable_broadcasting_) {
gen_result(a0, a1_dense_array, r_dense_array);
gen_result(a0_optional, a1_dense_array, r_dense_array);
gen_result(a0, a1_array, r_array);
gen_result(a0_optional, a1_array, r_array);
gen_result(a0_dense_array, a1_optional, r_dense_array);
gen_result(a0_dense_array, a1, r_dense_array);
gen_result(a0_array, a1_optional, r_array);
gen_result(a0_array, a1, r_array);
}
};
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
auto x = GetQType<typename decltype(meta_type)::type>();
gen_results(x, x, x);
});
static const auto numeric_qtypes = {
GetQType<int32_t>(),
GetQType<int64_t>(),
GetQType<float>(),
GetQType<double>(),
};
for (auto it = numeric_qtypes.begin();
result == nullptr && it != numeric_qtypes.end(); ++it) {
for (auto jt = numeric_qtypes.begin();
result == nullptr && jt != numeric_qtypes.end(); ++jt) {
gen_results(*it, *jt, *std::max(it, jt));
}
}
gen_results(GetWeakFloatQType(), GetWeakFloatQType(), GetWeakFloatQType());
gen_results(GetWeakFloatQType(), GetQType<int32_t>(), GetQType<float>());
gen_results(GetQType<int32_t>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<int64_t>(), GetQType<float>());
gen_results(GetQType<int64_t>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<float>(), GetQType<float>());
gen_results(GetQType<float>(), GetWeakFloatQType(), GetQType<float>());
gen_results(GetWeakFloatQType(), GetQType<double>(), GetQType<double>());
gen_results(GetQType<double>(), GetWeakFloatQType(), GetQType<double>());
return result;
}
class CommonQTypeMultipleParametersTests
: public ::testing::TestWithParam<bool> {
protected:
CommonQTypeMultipleParametersTests() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
using T = typename decltype(meta_type)::type;
known_qtypes_.push_back(GetQType<T>());
known_qtypes_.push_back(GetOptionalQType<T>());
known_qtypes_.push_back(GetDenseArrayQType<T>());
known_qtypes_.push_back(GetArrayQType<T>());
});
known_qtypes_.push_back(nullptr);
known_qtypes_.push_back(GetDenseArrayWeakFloatQType());
known_qtypes_.push_back(GetArrayWeakFloatQType());
known_qtypes_.push_back(MakeTupleQType({}));
enable_broadcasting_ = GetParam();
}
std::vector<const QType*> known_qtypes_;
bool enable_broadcasting_;
};
TEST_P(CommonQTypeMultipleParametersTests, VsReferenceImplementation) {
for (auto lhs : known_qtypes_) {
for (auto rhs : known_qtypes_) {
EXPECT_EQ(CommonQType(lhs, rhs, enable_broadcasting_),
ReferenceCommonQType(lhs, rhs, enable_broadcasting_))
<< "lhs=" << (lhs ? lhs->name() : "nullptr")
<< ", rhs=" << (rhs ? rhs->name() : "nullptr");
}
}
}
TEST_P(CommonQTypeMultipleParametersTests, SemiLatticeProperties) {
for (auto arg_0 : known_qtypes_) {
EXPECT_EQ(
CommonQType(arg_0, arg_0, enable_broadcasting_), arg_0);
for (auto arg_1 : known_qtypes_) {
EXPECT_EQ(
CommonQType(arg_0, arg_1, enable_broadcasting_),
CommonQType(arg_1, arg_0, enable_broadcasting_));
for (auto arg_2 : known_qtypes_) {
EXPECT_EQ(
CommonQType(CommonQType(arg_0, arg_1, enable_broadcasting_), arg_2,
enable_broadcasting_),
CommonQType(arg_0, CommonQType(arg_1, arg_2, enable_broadcasting_),
enable_broadcasting_))
<< arg_0->name() << " " << arg_1->name() << " " << arg_2->name();
}
}
}
}
INSTANTIATE_TEST_SUITE_P(CommonQTypeTests, CommonQTypeMultipleParametersTests,
::testing::Values(false, true));
class CommonQTypeTest : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
GetQType<typename decltype(meta_type)::type>();
GetOptionalQType<typename decltype(meta_type)::type>();
GetDenseArrayQType<typename decltype(meta_type)::type>();
});
}
};
TEST_F(CommonQTypeTest, OnSpans) {
EXPECT_THAT(CommonQType({}, true), IsNull());
EXPECT_EQ(CommonQType({GetQType<int64_t>()}, true),
GetQType<int64_t>());
EXPECT_THAT(
CommonQType({nullptr, GetQType<int64_t>()}, true),
IsNull());
EXPECT_THAT(
CommonQType({GetQType<int64_t>(), nullptr}, true),
IsNull());
EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>()},
true),
GetOptionalQType<int64_t>());
EXPECT_EQ(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>()},
true),
GetDenseArrayQType<int64_t>());
EXPECT_EQ(
CommonQType(GetDenseArrayQType<int32_t>(), GetOptionalQType<int64_t>(),
true),
GetDenseArrayQType<int64_t>());
EXPECT_THAT(CommonQType({GetQType<int64_t>(), GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>()},
false),
IsNull());
}
TEST_F(CommonQTypeTest, WeakQType) {
EXPECT_EQ(CommonQType(GetQType<double>(), GetWeakFloatQType(),
true),
GetQType<double>());
EXPECT_EQ(CommonQType(GetQType<float>(), GetWeakFloatQType(),
true),
GetQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetWeakFloatQType(),
true),
GetWeakFloatQType());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetWeakFloatQType(),
true),
GetOptionalWeakFloatQType());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetOptionalQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<double>(),
true),
GetOptionalQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetOptionalQType<float>(),
true),
GetOptionalQType<float>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<double>(),
true),
GetArrayQType<double>());
EXPECT_EQ(CommonQType(GetWeakFloatQType(), GetArrayQType<float>(),
true),
GetArrayQType<float>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<double>(),
true),
GetArrayQType<double>());
EXPECT_EQ(CommonQType(GetOptionalWeakFloatQType(), GetArrayQType<float>(),
true),
GetArrayQType<float>());
}
class CanCastImplicitlyTest : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
GetQType<typename decltype(meta_type)::type>();
GetOptionalQType<typename decltype(meta_type)::type>();
GetDenseArrayQType<typename decltype(meta_type)::type>();
});
}
};
TEST_F(CanCastImplicitlyTest, OnScalars) {
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<int64_t>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetQType<double>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<float>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetOptionalQType<double>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<int64_t>(), GetQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<float>(),
false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), GetQType<uint64_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<int32_t>(), nullptr,
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(nullptr, GetQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(nullptr, nullptr,
false),
IsFalse());
}
TEST_F(CanCastImplicitlyTest, WithBroadcasting) {
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetDenseArrayQType<int32_t>(),
true),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
true),
IsTrue());
}
TEST_F(CanCastImplicitlyTest, WeakQType) {
EXPECT_THAT(CanCastImplicitly(GetQType<float>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetQType<double>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetQType<int32_t>(), GetWeakFloatQType(), false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<float>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetQType<double>(), false),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetQType<int32_t>(), false),
IsFalse());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetOptionalQType<float>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetOptionalWeakFloatQType(),
GetOptionalQType<double>(), false),
IsTrue());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(),
false),
IsFalse());
EXPECT_THAT(CanCastImplicitly(GetWeakFloatQType(), GetArrayWeakFloatQType(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetOptionalWeakFloatQType(), GetArrayWeakFloatQType(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<float>(),
true),
IsTrue());
EXPECT_THAT(
CanCastImplicitly(GetWeakFloatQType(), GetDenseArrayQType<double>(),
true),
IsTrue());
}
class BroadcastQTypeTests : public ::testing::Test {
protected:
static void SetUpTestCase() {
meta::foreach_type<ScalarTypes>([&](auto meta_type) {
using T = typename decltype(meta_type)::type;
GetQType<T>();
GetOptionalQType<T>();
GetDenseArrayQType<T>();
GetArrayQType<T>();
});
GetDenseArrayWeakFloatQType();
GetArrayWeakFloatQType();
}
};
TEST_F(BroadcastQTypeTests, Empty) {
ASSERT_THAT(BroadcastQType({}, nullptr), IsNull());
}
TEST_F(BroadcastQTypeTests, SingleScalarType) {
ASSERT_EQ(BroadcastQType({}, GetQType<int32_t>()), GetQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, NullHandling) {
ASSERT_THAT(BroadcastQType({nullptr}, GetQType<int32_t>()), IsNull());
ASSERT_THAT(BroadcastQType({GetQType<int32_t>()}, nullptr), IsNull());
ASSERT_THAT(
BroadcastQType({GetQType<int32_t>(), nullptr}, GetQType<int32_t>()),
IsNull());
}
TEST_F(BroadcastQTypeTests, ScalarAndOptional) {
ASSERT_EQ(BroadcastQType({GetOptionalQType<int32_t>()}, GetQType<int64_t>()),
GetOptionalQType<int64_t>());
ASSERT_EQ(BroadcastQType({GetQType<int64_t>()}, GetOptionalQType<int32_t>()),
GetOptionalQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, ArrayAndDenseArray) {
EXPECT_THAT(
BroadcastQType({GetArrayQType<float>()}, GetDenseArrayQType<float>()),
IsNull());
EXPECT_THAT(
BroadcastQType({GetArrayQType<float>(), GetDenseArrayQType<float>()},
GetQType<float>()),
IsNull());
}
TEST_F(BroadcastQTypeTests, Basic) {
ASSERT_EQ(
BroadcastQType({GetOptionalQType<float>(), GetDenseArrayQType<Bytes>()},
GetQType<int32_t>()),
GetDenseArrayQType<int32_t>());
}
TEST_F(BroadcastQTypeTests, WeakFloat) {
ASSERT_EQ(BroadcastQType({GetDenseArrayQType<Unit>()}, GetWeakFloatQType()),
GetDenseArrayWeakFloatQType());
ASSERT_EQ(
BroadcastQType({GetDenseArrayQType<Unit>()}, GetOptionalWeakFloatQType()),
GetDenseArrayWeakFloatQType());
ASSERT_EQ(BroadcastQType({GetArrayQType<Unit>()}, GetWeakFloatQType()),
GetArrayWeakFloatQType());
ASSERT_EQ(
BroadcastQType({GetArrayQType<Unit>()}, GetOptionalWeakFloatQType()),
GetArrayWeakFloatQType());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/common_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/standard_type_properties/common_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9c586ff2-998f-46bb-8cd1-365898f411cf | cpp | tensorflow/tensorflow | triton_fusion_analysis | third_party/xla/xla/service/gpu/triton_fusion_analysis.cc | third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/cudnn_support_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/triton_tiling_propagation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using triton_fusion::DimOrdersAndReqs;
using triton_fusion::DimOrdersAndReqsOrError;
using triton_fusion::DotRequirements;
using triton_fusion::FusionContext;
using triton_fusion::GetPropagatedDimOrdersAndRequirements;
using triton_fusion::kNoSplitRequirement;
using triton_fusion::TransformDirection;
}
namespace triton_fusion {
absl::StatusOr<FusionContext> FusionContext::FromDotOperand(
const HloInstruction& dot, const int operand_number, const int split_k) {
const int num_split_k_batch_dims = split_k > 1;
int split_k_dimension_index = kNoDimensionIndex;
TF_ASSIGN_OR_RETURN(int contracting_dimension_index,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(int non_contracting_dimension_index,
NonContractingDimensionIndex(dot, operand_number));
if (split_k > 1) {
split_k_dimension_index = contracting_dimension_index - 1;
}
int splittable_dimension_index = kNoDimensionIndex;
if (operand_number == 0 &&
dot.dot_dimension_numbers().lhs_batch_dimensions_size() -
num_split_k_batch_dims ==
0) {
splittable_dimension_index = non_contracting_dimension_index;
}
FusionContext context(DotProperties{non_contracting_dimension_index,
splittable_dimension_index},
DotRequirements(kNoSplitRequirement));
context.dim_orders_[dot.operand(operand_number)] =
DimensionOrder::FromDotOperandOrOutput(*dot.operand(operand_number),
split_k_dimension_index);
return context;
}
FusionContext FusionContext::FromDotOutput(
const HloInstruction& dot, const int split_k,
DotRequirements requirements) {
int splittable_dimension_index = kNoDimensionIndex;
if (requirements.splittable_dimension_major_part_size > 1) {
splittable_dimension_index = (split_k > 1) ? 1 : 0;
}
FusionContext context(DotProperties{-1,
splittable_dimension_index},
std::move(requirements));
context.dim_orders_[&dot] = DimensionOrder::FromDotOperandOrOutput(dot);
return context;
}
namespace {
int64_t NumAddedParameters(const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kConstant &&
!ShapeUtil::IsScalar(hlo.shape())) {
return 0;
}
return hlo.operand_count() - 1;
}
}
bool FusionContext::CombineDimOrdersAndReqs(const DimOrdersAndReqs& update) {
for (const auto& [key, value] : update.dim_orders) {
auto it = dim_orders_.find(key);
if (it != dim_orders_.cend() && !it->second.IsPhysicallyEquivalent(value)) {
return false;
}
}
DotRequirementsOrError requirements_or_error =
CombineDotRequirements(requirements_, update.requirements);
if (std::holds_alternative<FusionDecision>(requirements_or_error)) {
return false;
}
requirements_ = std::move(std::get<DotRequirements>(requirements_or_error));
dim_orders_.insert(update.dim_orders.begin(), update.dim_orders.end());
return true;
}
absl::Status FusionContext::PropagateDimensionOrdersToParameters(
const HloInstruction& origin, ConstHloInstructionSet& parameters,
ConstHloInstructionMap<TensorIterationSpec>& iter_specs) {
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> to_process;
visited.insert(&origin);
to_process.push(&origin);
while (!to_process.empty()) {
const HloInstruction* hlo = to_process.front();
to_process.pop();
if (hlo->opcode() == HloOpcode::kParameter) {
if (!parameters.insert(hlo).second) {
return FailedPrecondition(
"A parameter is read differently by different users. hlo: %s",
hlo->ToString());
}
VLOG(5) << hlo->ToString();
}
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*hlo, dim_orders_.at(hlo), TransformDirection::kOutputToInput,
properties_);
if (!std::holds_alternative<DimOrdersAndReqs>(result)) {
return FailedPrecondition(
"Can not propagate dim orders and requirements.");
}
if (!CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result))) {
return FailedPrecondition("Can not combine dim orders and requirements.");
}
iter_specs[hlo] = dim_orders_.at(hlo).ToTensorIterationSpec();
for (const HloInstruction* operand : hlo->operands()) {
if (!visited.insert(operand).second) {
continue;
}
if (operand->opcode() == HloOpcode::kDot) {
continue;
}
to_process.push(operand);
}
}
return absl::OkStatus();
}
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloComputation& computation, const int split_k) {
VLOG(5) << computation.ToString(HloPrintOptions::ShortParsable());
TritonFusionAnalysis analysis;
const HloInstruction* dot =
hlo_query::GetFirstInstructionWithOpcode(computation, HloOpcode::kDot);
TF_RET_CHECK(dot != nullptr);
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(*dot, split_k));
return analysis;
}
absl::StatusOr<TritonFusionAnalysis> TritonFusionAnalysis::Execute(
const HloDotInstruction& dot, int split_k) {
TritonFusionAnalysis analysis;
TF_RETURN_IF_ERROR(analysis.ExecuteForDotFusion(dot, split_k));
return analysis;
}
absl::Status TritonFusionAnalysis::ExecuteForProducerConsumer(
const HloInstruction& producer, const HloInstruction& consumer,
int split_k) {
std::unique_ptr<HloModule> new_module =
ExtractProducerConsumerIntoNewModule(producer, consumer);
auto* new_producer =
new_module->entry_computation()->GetInstructionWithName(producer.name());
auto* new_consumer =
new_module->entry_computation()->GetInstructionWithName(consumer.name());
std::unique_ptr<HloInstruction> fusion_instruction_holder;
HloInstruction* fusion_instruction;
if (new_consumer->opcode() == HloOpcode::kFusion) {
fusion_instruction = new_consumer;
} else {
fusion_instruction_holder = HloInstruction::CreateFusion(
new_consumer->shape(), new_producer->fusion_kind(), new_consumer);
fusion_instruction = fusion_instruction_holder.get();
}
if (new_producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(new_producer);
} else {
fusion_instruction->FuseInstruction(new_producer);
}
auto* fused_computation =
fusion_instruction->fused_instructions_computation();
return Execute(*fused_computation, split_k).status();
}
bool TritonFusionAnalysis::IsBatchDimMinorForInt4Parameter(
const HloInstruction& dot, Scope scope) const {
CHECK(scope == Scope::LHS || scope == Scope::RHS);
const auto& dims = dot.dot_dimension_numbers();
const auto& batch_dims = (scope == Scope::LHS) ? dims.lhs_batch_dimensions()
: dims.rhs_batch_dimensions();
if (batch_dims.empty()) return true;
int32_t batch_dim = batch_dims.Get(0);
CHECK_EQ(batch_dims.size(), 1);
const auto& params = parameters_.at(scope);
for (const auto& param : params) {
if (param->shape().element_type() != S4) continue;
const auto* strides = IterSpec(scope, param, batch_dim);
if (strides == nullptr) continue;
if (strides->front().stride == 1) return false;
}
return true;
}
absl::Status TritonFusionAnalysis::ExecuteForDotFusion(
const HloInstruction& dot, const int split_k) {
DotRequirements lhs_requirements(kNoSplitRequirement);
for (const Scope scope : {Scope::LHS, Scope::RHS, Scope::META}) {
const int operand_number = static_cast<int>(scope);
if (dot.operand_count() < operand_number + 1) {
continue;
}
TF_ASSIGN_OR_RETURN(auto context, FusionContext::FromDotOperand(
dot, operand_number, split_k));
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*dot.operand(operand_number), parameters_[scope], iter_specs_[scope]));
if (scope == Scope::LHS) {
lhs_requirements = context.requirements();
}
}
auto context = FusionContext::FromDotOutput(dot, split_k, lhs_requirements);
const HloInstruction* output = ˙
while (!output->IsRoot()) {
TF_RET_CHECK(output->user_count() == 1);
const HloInstruction* input = output;
if (IsWorkspaceAllocationRoot(*output->users()[0])) {
break;
}
output = output->users()[0];
DimOrdersAndReqsOrError result = GetPropagatedDimOrdersAndRequirements(
*output, context.dim_orders().at(input),
TransformDirection::kInputToOutput, context.dot_properties());
if (std::holds_alternative<FusionDecision>(result)) {
auto decision = std::get<FusionDecision>(result);
return FailedPrecondition("Failed to propagate tiling with error: %s",
decision.Explain());
}
TF_RET_CHECK(
context.CombineDimOrdersAndReqs(std::get<DimOrdersAndReqs>(result)));
}
TF_RET_CHECK(
iter_specs_[Scope::OUTPUT]
.insert(
{output, context.dim_orders().at(output).ToTensorIterationSpec()})
.second);
parameters_[Scope::OUTPUT] = {};
if (output != &dot) {
TF_RETURN_IF_ERROR(context.PropagateDimensionOrdersToParameters(
*output, parameters_[Scope::OUTPUT], iter_specs_[Scope::OUTPUT]));
}
return absl::OkStatus();
}
std::optional<TritonFusionAnalysis::Scope>
TritonFusionAnalysis::QueryInstructionScope(const HloInstruction& hlo) const {
for (const Scope& scope : {Scope::LHS, Scope::RHS, Scope::OUTPUT}) {
if (iter_specs_.at(scope).count(&hlo) > 0) {
return scope;
}
}
LOG(WARNING) << "No scope for hlo: " << hlo.ToString();
return std::nullopt;
}
const TensorIterationSpec::DimIterationSpec* TritonFusionAnalysis::IterSpec(
const TritonFusionAnalysis::Scope scope, const HloInstruction* hlo,
const int dimension) const {
auto hlo_spec = iter_specs_.at(scope).find(hlo);
if (hlo_spec != iter_specs_.at(scope).cend()) {
return hlo_spec->second.Find(dimension);
}
return nullptr;
}
namespace {
std::string IterationSpecByInstructionMapToString(
const TritonFusionAnalysis::IterationSpecByInstructionMap& m) {
return absl::StrCat("IterSpec{",
absl::StrJoin(m, ", ",
[&](std::string* s, const auto& kv) {
absl::StrAppend(s, kv.first->name(), ": ",
kv.second.ToString());
}),
"}");
}
std::string ScopeToString(TritonFusionAnalysis::Scope s) {
switch (s) {
case TritonFusionAnalysis::Scope::LHS:
return "LHS";
case TritonFusionAnalysis::Scope::RHS:
return "RHS";
case TritonFusionAnalysis::Scope::META:
return "META";
case TritonFusionAnalysis::Scope::OUTPUT:
return "OUTPUT";
}
}
}
std::string TritonFusionAnalysis::ToString() const {
return absl::StrCat(
"TritonFusionAnalysis{\n",
absl::StrJoin(iter_specs_, ",\n",
[&](std::string* s, const auto& kv) {
absl::StrAppend(
s, ScopeToString(kv.first), ": ",
IterationSpecByInstructionMapToString(kv.second));
}),
"\n}");
}
}
} | #include "xla/service/gpu/triton_fusion_analysis.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/transforms/gemm_fusion.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::FieldsAre;
using TritonDotAnalysisTest = HloTestBase;
TEST_F(TritonDotAnalysisTest, QueryingOutputScopeParametersAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_dot {
p0 = f32[8,8] parameter(0)
ROOT dot = f32[8,8] dot(p0, p0),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[8,8] parameter(0)
ROOT r = f32[8,8] fusion(p0), kind=kCustom, calls=triton_dot
})"));
TF_ASSERT_OK_AND_ASSIGN(
const auto analysis,
TritonFusionAnalysis::Execute(*module->entry_computation()
->root_instruction()
->called_computations()[0]));
EXPECT_TRUE(
analysis.ScopeParameters(TritonFusionAnalysis::Scope::OUTPUT).empty());
}
TEST_F(TritonDotAnalysisTest, NopBitcasts) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[48,4]{1,0} parameter(0)
bitcast.18 = s8[1,48,4]{2,1,0} bitcast(param_0.1)
bitcast.19 = s8[48,4]{1,0} bitcast(bitcast.18)
convert.4 = bf16[48,4]{1,0} convert(bitcast.19)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[48,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 48, 0,
48, ElementsAre(48))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4, 0,
4, ElementsAre(4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3, 0,
3, ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, DoNotRemoveTrivialDimensionForDot) {
const std::string hlo_text = R"(
HloModule t, is_scheduled=true
triton_dot {
param_0.1 = f32[137,115]{1,0} parameter(0)
param_1.1 = f32[1,115]{1,0} parameter(1)
ROOT dot = f32[137,1]{1,0} dot(param_0.1, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[137,115]{1,0} parameter(0)
p1 = f32[1,115]{1,0} parameter(1)
ROOT custom-call = f32[137,1]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot,
backend_config={"fusion_backend_config": {kind: "__triton_gemm",
triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,
"split_k":1,"num_stages":1,"num_warps":2,
"num_ctas":1}}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(115, 137, 0,
137, ElementsAre(137))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(115, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 115, 0,
115, ElementsAre(115))));
}
TEST_F(TritonDotAnalysisTest, Merge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,8,6,4]{3,2,1,0} parameter(0)
bitcast.18 = s8[48,4]{1,0} bitcast(param_0.1)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,8,6,4]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(4, 6 * 8,
0, 6 * 8,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, Split) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
%parameter_1 = f32[24000,2]{1,0} parameter(1)
%convert.15 = f16[24000,2]{1,0} convert(%parameter_1)
%parameter_0 = f16[4]{0} parameter(0)
%bitcast.45 = f16[2,2]{1,0} bitcast(%parameter_0)
ROOT %dot.26 = f16[24000,2]{1,0} dot(%convert.15, %bitcast.45),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[4]{0} parameter(0)
p1 = f32[24000,2]{1,0} parameter(1)
ROOT r = f16[24000,2]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p0);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 0),
ElementsAre(FieldsAre(2, 24000,
0, 24000,
ElementsAre(24000))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 0),
ElementsAre(FieldsAre(2, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p0, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, TransposeMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
transpose.3 = s8[1,8,6,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = s8[48,4]{1,0} bitcast(transpose.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, CopyMerge) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = s8[1,4,8,6]{3,2,1,0} parameter(0)
bitcast.99 = s8[1,8,6,4]{2,1,3,0} bitcast(param_0.1)
copy.3 = s8[1,8,6,4]{3,2,1,0} copy(bitcast.99)
bitcast.18 = s8[48,4]{1,0} bitcast(copy.3)
convert.4 = bf16[48,4]{1,0} convert(bitcast.18)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[48,3]{1,0} dot(convert.4, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = s8[1,4,8,6]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[48,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton",
called_computations={triton_dot}
ROOT bitcast.2 = bf16[1,8,6,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8 * 6,
0, 8 * 6,
ElementsAre(6, 8))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8 * 6, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeMergeNCN) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
param_0.1 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
transpose.3 = bf16[3,8,1,4]{3,2,1,0} transpose(param_0.1), dimensions={0,2,3,1}
bitcast.18 = bf16[24,4]{1,0} bitcast(transpose.3)
param_1.1 = bf16[4,3]{1,0} parameter(1)
ROOT dot = bf16[24,3]{1,0} dot(bitcast.18, param_1.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[3,4,8,1]{3,2,1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
custom-call = bf16[24,3]{1,0} custom-call(p0, p1),
custom_call_target="__triton", called_computations={triton_dot}
ROOT bitcast.2 = bf16[3,8,1,3]{3,2,1,0} bitcast(custom-call)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation = module->entry_computation()
->root_instruction()
->operand(0)
->called_computations()[0];
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(1, 8,
0, 8,
ElementsAre(8)),
FieldsAre(4 * 8, 3,
0, 3,
ElementsAre(3))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(8, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(3, 4,
0, 4,
ElementsAre(4))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 3,
0, 3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, TransposeOutput) {
const std::string hlo_text = R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc = bf16[12,2,3]{2,1,0} bitcast(dot)
ROOT t = bf16[3,12,2]{2,1,0} transpose(bc), dimensions={2,0,1}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
ROOT r = bf16[3,12,2]{2,1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* dot_output = dot_computation->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(2, 12))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, dot_output, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, OutputParameterIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
dot = bf16[24,3]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
p2 = f16[3,24]{1,0} parameter(2)
p2t = f16[24,3]{1,0} transpose(p2), dimensions={1,0}
p2tc = bf16[24,3]{1,0} convert(p2t)
ROOT r = bf16[24,3]{1,0} divide(p2tc, dot)
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4,3]{1,0} parameter(1)
p2 = f16[3,24]{1,0} parameter(2)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1, p2), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* output_param =
dot_computation->parameter_instruction(2);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 0),
ElementsAre(FieldsAre(1, 24, 0,
24,
ElementsAre(24))));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1)
->size(),
1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT, output_param, 1),
ElementsAre(FieldsAre(24, 3, 0,
3,
ElementsAre(3))));
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromScalarIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
p1b = bf16[4,3] broadcast(p1)
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* scalar = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 0),
nullptr);
EXPECT_EQ(analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, scalar, 1),
nullptr);
}
TEST_F(TritonDotAnalysisTest, InputBroadcastFromVectorIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_dot {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
p1b = bf16[4,3] broadcast(p1), dimensions={0}
ROOT dot = bf16[24,3]{1,0} dot(p0, p1b),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[24,4]{1,0} parameter(0)
p1 = bf16[4] parameter(1)
ROOT r = bf16[24,3]{1,0} fusion(p0, p1), kind=kCustom,
calls=triton_dot
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
const HloInstruction* vector = dot_computation->parameter_instruction(1);
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_EQ(
analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0)->size(),
1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, vector, 0),
ElementsAre(FieldsAre(1, 4,
0, 4,
ElementsAre(4))));
}
TEST_F(TritonDotAnalysisTest, OutputBroadcastIsNotAccepted) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
ENTRY e {
p0 = f16[2,35] parameter(0)
p0c = bf16[2,35] convert(p0)
p1 = bf16[35,2] parameter(1)
dot = bf16[2,2] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT bc = bf16[2,2,100] broadcast(dot), dimensions={0,1}
})"));
EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{
se::CudaComputeCapability::AMPERE, 0})
.Run(module.get())
.value());
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kBroadcast);
}
TEST_F(TritonDotAnalysisTest, DegenerateSplitFragmentIsHandled) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::OUTPUT,
dot_computation->root_instruction(), 0),
ElementsAre(FieldsAre(1, 8 * 21,
0, 8 * 21,
ElementsAre(21, 8)),
FieldsAre(8 * 21 * 58, 30,
0, 30,
ElementsAre(30))));
}
TEST_F(TritonDotAnalysisTest,
HandlesFurtherPropagationFromTrivialSizedTensorGracefully) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
a = f32[3,3]{1,0} parameter(0)
constant = f32[1,1]{1,0} constant({ {0} })
broadcast = f32[1,1]{1,0} broadcast(constant), dimensions={0,1}
reshape = f32[] reshape(broadcast)
broadcast2 = f32[3,3]{1,0} broadcast(reshape), dimensions={}
ROOT dot = f32[3,3]{1,0} dot(a, broadcast2),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
ENTRY e {
a = f32[3,3]{1,0} parameter(0)
ROOT dot = f32[3,3]{1,0} fusion(a), kind=kCustom, calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
absl::StatusOr<TritonFusionAnalysis> analysis =
TritonFusionAnalysis::Execute(*dot_computation);
(void)analysis;
}
TEST_F(TritonDotAnalysisTest, DynamicSliceIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
dynamic_slice = f32[64,2]{1,0} dynamic-slice(dynamic_slice_input,
start_index0, start_index1),
dynamic_slice_sizes={64,2}
ROOT dot = f32[18,64]{1,0} dot(dot_lhs, dynamic_slice),
lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
dot_lhs = f32[2,18]{1,0} parameter(0)
dynamic_slice_input = f32[96,2]{1,0} parameter(1)
start_index0 = s32[] parameter(2)
start_index1 = s32[] parameter(3)
ROOT triton_gemm_d = f32[18,64]{1,0} fusion(dot_lhs, dynamic_slice_input,
start_index0, start_index1),
kind=kCustom,
calls=triton_gemm,
backend_config={"kind":"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 0),
ElementsAre(FieldsAre(18, 2,
0, 2,
ElementsAre(2))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 18,
0, 18,
ElementsAre(18))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 0),
ElementsAre(FieldsAre(2, 96,
0, 96,
ElementsAre(96))));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(1, 2,
0, 2,
ElementsAre(2))));
}
TEST_F(TritonDotAnalysisTest, SparseDot) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm {
lhs = bf16[5,16] parameter(0)
rhs = bf16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT dot = f32[5,10] dot(lhs, rhs, meta),
lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4
}
ENTRY main {
lhs = bf16[5,16] parameter(0)
rhs = bf16[32,10] parameter(1)
meta = u16[5,2] parameter(2)
ROOT out = f32[5,10] fusion(lhs, rhs, meta),
kind=kCustom, calls=triton_gemm, backend_config={kind:"__triton_gemm"}
}
)"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::META,
dot_computation->parameter_instruction(2), 0),
::testing::SizeIs(1));
}
TEST_F(TritonDotAnalysisTest, QueryScopeAlwaysWorks) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
triton_gemm_r {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
bitcast.6 = s8[30,8,21,913]{2,1,3,0} bitcast(Arg_0.1)
copy.7 = s8[30,8,21,913]{3,2,1,0} copy(bitcast.6)
bitcast.8 = s8[5040,913]{1,0} bitcast(copy.7)
convert.9 = bf16[5040,913]{1,0} convert(bitcast.8)
bitcast.32 = bf16[58,913]{1,0} parameter(1)
dot.33 = bf16[5040,58]{1,0} dot(convert.9, bitcast.32),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
bitcast.34 = bf16[30,8,21,58]{3,2,1,0} bitcast(dot.33)
copy.35 = bf16[30,8,21,58]{2,1,3,0} copy(bitcast.34)
ROOT bitcast.41 = bf16[30,1,58,8,21]{4,3,2,1,0} bitcast(copy.35)
}
ENTRY e {
Arg_0.1 = s8[30,913,8,21]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[58,913]{1,0} parameter(1)
ROOT r = bf16[30,1,58,8,21]{4,3,2,1,0} fusion(Arg_0.1, Arg_1.2), kind=kCustom,
calls=triton_gemm_r,
backend_config={kind: "__triton_gemm"}
})"));
const HloComputation* dot_computation =
module->entry_computation()->root_instruction()->called_computations()[0];
TF_ASSERT_OK_AND_ASSIGN(const auto analysis,
TritonFusionAnalysis::Execute(*dot_computation));
for (const auto& hlo : dot_computation->instructions()) {
if (hlo->opcode() != HloOpcode::kDot) {
EXPECT_TRUE(analysis.QueryInstructionScope(*hlo).has_value());
}
}
}
TEST_F(TritonDotAnalysisTest, PadWithTrivialDimension) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule t
triton_gemm_dot {
parameter_0 = f32[1001,1]{1,0} parameter(0)
constant = f32[] constant(0)
pad = f32[1004,1]{1,0} pad(parameter_0, constant), padding=0_3x0_0
bitcast = f32[4,251,1]{2,1,0} bitcast(pad)
parameter_1 = f32[4,251,2048]{2,1,0} parameter(1)
ROOT dot = f32[4,1,2048]{2,1,0} dot(bitcast, parameter_1),
lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0},
rhs_contracting_dims={1}
})"));
const HloComputation* dot_computation = *module->computations().begin();
TF_ASSERT_OK_AND_ASSIGN(
TritonFusionAnalysis analysis,
TritonFusionAnalysis::Execute(*dot_computation, 4));
const HloInstruction* p0 = dot_computation->parameter_instruction(0);
const HloInstruction* p1 = dot_computation->parameter_instruction(1);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).begin(),
p0);
EXPECT_EQ(*analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).begin(),
p1);
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 1),
ElementsAre(FieldsAre(1, 1001, 0,
1001, ElementsAre(1001))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, p0, 2),
ElementsAre(FieldsAre(1, 1, 0,
1, ElementsAre(1))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 1),
ElementsAre(FieldsAre(2048, 1004, 0,
1004, ElementsAre(251, 4))));
EXPECT_THAT(
*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, p1, 2),
ElementsAre(FieldsAre(1, 2048, 0,
2048, ElementsAre(2048))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/triton_fusion_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac2af6eb-d6da-4cea-91cc-9358932ac908 | cpp | google/tensorstore | transformed_array | tensorstore/index_space/transformed_array.cc | tensorstore/index_space/transformed_array_test.cc | #include "tensorstore/index_space/transformed_array.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <cassert>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/identity_transform.h"
#include "tensorstore/index_space/internal/iterate_impl.h"
#include "tensorstore/index_space/internal/propagate_bounds.h"
#include "tensorstore/index_space/internal/transform_rep.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/rank.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/byte_strided_pointer.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/element_pointer.h"
#include "tensorstore/util/internal/iterate_impl.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
std::string DescribeTransformedArrayForCast(DataType dtype,
DimensionIndex rank) {
return tensorstore::StrCat(
"transformed array with ", StaticCastTraits<DataType>::Describe(dtype),
" and ", StaticCastTraits<DimensionIndex>::Describe(rank));
}
namespace {
void MultiplyByteStridesIntoOutputIndexMaps(TransformRep* transform,
span<const Index> byte_strides) {
const span<OutputIndexMap> output_maps = transform->output_index_maps();
assert(byte_strides.size() == output_maps.size());
for (DimensionIndex i = 0; i < byte_strides.size(); ++i) {
auto& map = output_maps[i];
const Index byte_stride = byte_strides[i];
const Index stride =
internal::wrap_on_overflow::Multiply(map.stride(), byte_stride);
if (stride == 0) {
map.SetConstant();
}
map.stride() = stride;
map.offset() =
internal::wrap_on_overflow::Multiply(map.offset(), byte_stride);
}
}
}
absl::Status CopyTransformedArrayImpl(TransformedArrayView<const void> source,
TransformedArrayView<void> dest) {
TENSORSTORE_ASSIGN_OR_RETURN(auto r, internal::GetDataTypeConverterOrError(
source.dtype(), dest.dtype()));
absl::Status status;
using TA = TransformedArrayView<const void>;
TENSORSTORE_ASSIGN_OR_RETURN(auto success,
internal::IterateOverTransformedArrays<2>(
r.closure, &status, skip_repeated_elements,
span<const TA, 2>({source, TA(dest)})));
if (!success) {
return internal::GetElementCopyErrorStatus(std::move(status));
}
return status;
}
TransformRep::Ptr<> MakeTransformFromStridedLayout(
StridedLayoutView<dynamic_rank, offset_origin> layout) {
auto result = MakeIdentityTransform(layout.domain());
MultiplyByteStridesIntoOutputIndexMaps(result.get(), layout.byte_strides());
internal_index_space::DebugCheckInvariants(result.get());
return result;
}
Result<TransformRep::Ptr<>> MakeTransformFromStridedLayoutAndTransform(
StridedLayoutView<dynamic_rank, offset_origin> layout,
TransformRep::Ptr<> transform) {
if (!transform) return MakeTransformFromStridedLayout(layout);
if (transform->output_rank != layout.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Transform output rank (", transform->output_rank,
") does not equal array rank (", layout.rank(), ")"));
}
TENSORSTORE_ASSIGN_OR_RETURN(
transform, PropagateExplicitBoundsToTransform(layout.domain(),
std::move(transform)));
MultiplyByteStridesIntoOutputIndexMaps(transform.get(),
layout.byte_strides());
internal_index_space::DebugCheckInvariants(transform.get());
return transform;
}
StridedLayoutView<dynamic_rank, offset_origin> GetUnboundedLayout(
DimensionIndex rank) {
return StridedLayoutView<dynamic_rank, offset_origin>(
rank, GetConstantVector<Index, -kInfIndex>(rank).data(),
GetConstantVector<Index, kInfSize>(rank).data(),
GetConstantVector<Index, 1>(rank).data());
}
}
namespace internal {
template <size_t Arity>
Result<bool> IterateOverTransformedArrays(
ElementwiseClosure<Arity, void*> closure, void* arg,
IterationConstraints constraints,
span<const TransformedArrayView<const void>, Arity> transformed_arrays) {
if (Arity == 0) return true;
const DimensionIndex input_rank = transformed_arrays[0].rank();
namespace flags = internal_index_space::input_dimension_iteration_flags;
flags::Bitmask input_dimension_flags[kMaxRank];
std::fill_n(
&input_dimension_flags[0], input_rank,
flags::GetDefaultBitmask(constraints.repeated_elements_constraint()));
internal_index_space::SingleArrayIterationState single_array_states[Arity];
Box<dynamic_rank(kNumInlinedDims)> input_bounds(input_rank);
bool failed = false;
for (size_t i = 0; i < Arity; ++i) {
if (transformed_arrays[i].domain().rank() != input_rank) {
failed = true;
}
}
if (failed) {
DimensionIndex transformed_ranks[Arity];
for (size_t i = 0; i < Arity; ++i) {
transformed_ranks[i] = transformed_arrays[i].domain().rank();
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Transformed array input ranks ",
span(transformed_ranks), " do not all match"));
}
for (size_t i = 0; i < Arity; ++i) {
const BoxView<> domain = transformed_arrays[i].domain().box();
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::ValidateAndIntersectBounds(
domain, input_bounds, [](IndexInterval a, IndexInterval b) {
return AreCompatibleOrUnbounded(a, b);
}));
}
for (DimensionIndex i = 0; i < input_rank; ++i) {
if (input_bounds.shape()[i] == 0) {
return true;
}
}
bool has_array_indexed_output_dimensions = false;
for (size_t i = 0; i < Arity; ++i) {
const auto& ta = transformed_arrays[i];
auto& single_array_state = single_array_states[i];
TENSORSTORE_RETURN_IF_ERROR(
internal_index_space::InitializeSingleArrayIterationState(
ta.element_pointer(),
internal_index_space::TransformAccess::rep(ta.transform()),
input_bounds.origin().data(), input_bounds.shape().data(),
&single_array_state, &input_dimension_flags[0]));
if (single_array_state.num_array_indexed_output_dimensions) {
has_array_indexed_output_dimensions = true;
}
}
std::array<std::ptrdiff_t, Arity> element_sizes;
for (size_t i = 0; i < Arity; ++i) {
element_sizes[i] = transformed_arrays[i].dtype()->size;
}
if (!has_array_indexed_output_dimensions) {
std::array<ByteStridedPointer<void>, Arity> pointers;
std::array<const Index*, Arity> strides;
for (size_t i = 0; i < Arity; ++i) {
pointers[i] = single_array_states[i].base_pointer;
strides[i] = &single_array_states[i].input_byte_strides[0];
}
return IterateOverStridedLayouts<Arity>(closure, arg, input_bounds.shape(),
pointers, strides, constraints,
element_sizes);
}
internal_index_space::MarkSingletonDimsAsSkippable(input_bounds.shape(),
&input_dimension_flags[0]);
internal_index_space::SimplifiedDimensionIterationOrder layout =
internal_index_space::SimplifyDimensionIterationOrder<Arity>(
internal_index_space::ComputeDimensionIterationOrder<Arity>(
single_array_states, span(input_dimension_flags, input_rank),
constraints.order_constraint()),
input_bounds.shape(), single_array_states);
return internal_index_space::IterateUsingSimplifiedLayout<Arity>(
layout, input_bounds.shape(), closure, arg, single_array_states,
element_sizes);
}
#define TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS(Arity) \
template Result<bool> IterateOverTransformedArrays<Arity>( \
ElementwiseClosure<Arity, void*> closure, void* arg, \
IterationConstraints constraints, \
span<const TransformedArrayView<const void>, Arity> transformed_arrays); \
TENSORSTORE_INTERNAL_FOR_EACH_ARITY(
TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS)
#undef TENSORSTORE_DO_INSTANTIATE_ITERATE_OVER_TRANSFORMED_ARRAYS
Result<ElementPointer<Shared<const void>>> TryConvertToArrayImpl(
ElementPointer<Shared<const void>> element_pointer,
IndexTransformView<> transform, Index* output_origin, Index* output_shape,
Index* output_byte_strides) {
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
if (output_origin) {
std::copy_n(transform.input_origin().begin(), input_rank, output_origin);
}
std::copy_n(transform.input_shape().begin(), input_rank, output_shape);
Index offset = 0;
std::fill_n(output_byte_strides, input_rank, Index(0));
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto map = transform.output_index_map(output_dim);
offset = internal::wrap_on_overflow::Add(offset, map.offset());
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
output_byte_strides[input_dim] = internal::wrap_on_overflow::Add(
output_byte_strides[input_dim], map.stride());
break;
}
case OutputIndexMethod::array:
return absl::InvalidArgumentError(
"Cannot view transformed array with index arrays as a strided "
"array");
}
}
if (!output_origin) {
offset = internal::wrap_on_overflow::Add(
offset, IndexInnerProduct(input_rank, transform.input_origin().data(),
output_byte_strides));
}
return AddByteOffset(std::move(element_pointer), offset);
}
}
} | #include "tensorstore/index_space/transformed_array.h"
#include <stddef.h>
#include <stdint.h>
#include <random>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/array_testutil.h"
#include "tensorstore/box.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/transform_array_constraints.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/rank.h"
#include "tensorstore/static_cast.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::MakeArray;
using ::tensorstore::MakeOffsetArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::Shared;
using ::tensorstore::StaticDataTypeCast;
using ::tensorstore::StaticRankCast;
using ::tensorstore::TransformedArray;
using ::tensorstore::dtypes::float32_t;
static_assert(std::is_convertible_v<tensorstore::TransformedSharedArray<int, 1>,
tensorstore::TransformedArrayView<int, 1>>);
static_assert(
!std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedSharedArray<int, 1>>);
static_assert(std::is_convertible_v<tensorstore::TransformedArrayView<int, 1>,
tensorstore::TransformedArray<int, 1>>);
static_assert(
std::is_same_v<typename tensorstore::TransformedArrayView<int, 1>::
template RebindContainerKind<tensorstore::container>,
tensorstore::TransformedArray<int, 1>>);
static_assert(tensorstore::HasBoxDomain<tensorstore::TransformedArray<int, 1>>);
template <typename TA>
std::vector<const typename TA::Element*> GetPointers(const TA& a) {
using Element = const typename TA::Element;
std::vector<Element*> pointers;
auto result = IterateOverTransformedArrays(
[&](Element* x) { pointers.push_back(x); },
tensorstore::skip_repeated_elements, a);
EXPECT_TRUE(result);
return pointers;
}
using TransformedArrayTestTypes =
::testing::Types<tensorstore::TransformedSharedArray<int>,
tensorstore::TransformedSharedArray<int, 1>>;
template <typename T>
class TransformedArrayConstructorTest : public ::testing::Test {};
TYPED_TEST_SUITE(TransformedArrayConstructorTest, TransformedArrayTestTypes);
template <typename TransformedArrayType, typename SourceArray>
void TestCopyAndMove(SourceArray&& source,
std::vector<const int*> expected_pointers) {
{
TransformedArrayType tb(source);
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tb));
EXPECT_EQ(expected_pointers, GetPointers(tb));
}
{
auto source_copy = source;
TransformedArrayType tc(std::move(source_copy));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(tc));
EXPECT_EQ(expected_pointers, GetPointers(tc));
}
{
TransformedArrayType td;
td = source;
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
EXPECT_EQ(expected_pointers, GetPointers(td));
}
{
auto source_copy = source;
TransformedArrayType td;
td = std::move(source_copy);
EXPECT_EQ(expected_pointers, GetPointers(td));
EXPECT_EQ(GetBoxDomainOf(source), GetBoxDomainOf(td));
}
}
TYPED_TEST(TransformedArrayConstructorTest, DefaultConstruct) {
TypeParam ta;
EXPECT_FALSE(ta.transform());
EXPECT_EQ(nullptr, ta.element_pointer());
}
template <typename TransformedArrayType, typename Array>
void TestConstructFromArray(Array&& array,
std::vector<const int*> expected_pointers) {
auto array_copy = array;
TransformedArrayType ta(std::forward<Array>(array));
EXPECT_EQ(array_copy.domain(), ta.domain().box());
EXPECT_EQ(array_copy.domain(), GetBoxDomainOf(ta));
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromZeroOriginArray) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(tensorstore::SharedArrayView<int, 1>(a),
expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest, ConstructFromOffsetOriginArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
const std::vector<const int*> expected_pointers{&a(3), &a(4), &a(5)};
TestConstructFromArray<TypeParam>(a, expected_pointers);
TestConstructFromArray<TypeParam>(
tensorstore::SharedOffsetArrayView<int, 1>(a), expected_pointers);
}
template <typename TransformedArrayType, typename ElementPointer,
typename Transform>
void TestConstructFromElementPointerAndTransform(
ElementPointer&& element_pointer, Transform&& transform,
std::vector<const int*> expected_pointers) {
auto element_pointer_copy = element_pointer;
auto transform_copy = transform;
TransformedArrayType ta(std::forward<ElementPointer>(element_pointer),
std::forward<Transform>(transform));
EXPECT_EQ(GetBoxDomainOf(transform_copy), GetBoxDomainOf(ta));
EXPECT_EQ(transform_copy, ta.transform());
EXPECT_EQ(element_pointer_copy, ta.element_pointer());
auto pointers = GetPointers(ta);
EXPECT_EQ(expected_pointers, pointers);
TestCopyAndMove<TransformedArrayType>(ta, expected_pointers);
TestCopyAndMove<typename TransformedArrayType::template RebindContainerKind<
tensorstore::container>>(ta, expected_pointers);
}
TYPED_TEST(TransformedArrayConstructorTest,
ConstructFromElementPointerAndTransform) {
auto a = MakeArray<int>({1, 2, 3});
const std::vector<const int*> expected_pointers{&a(0), &a(1), &a(2)};
auto t = tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.output_single_input_dimension(0, 0, sizeof(int), 0)
.Finalize()
.value();
TestConstructFromElementPointerAndTransform<TypeParam>(a.element_pointer(), t,
expected_pointers);
auto element_pointer = a.element_pointer();
auto t_copy = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
std::move(element_pointer), std::move(t_copy), expected_pointers);
tensorstore::IndexTransformView<1, 1> t_view = t;
TestConstructFromElementPointerAndTransform<TypeParam>(
a.element_pointer(), t_view, expected_pointers);
}
TEST(TransformedArrayTest, Array) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
auto a_copy = a;
EXPECT_EQ(3, a.pointer().use_count());
auto tb = tensorstore::TransformedArray(std::move(a_copy));
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(3, a.pointer().use_count());
EXPECT_FALSE(a_copy.valid());
}
TEST(TransformedArrayTest, TransformedArray) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
auto ta = tensorstore::TransformedArray(a);
auto tb = tensorstore::TransformedArray(ta);
static_assert(std::is_same_v<decltype(tb),
tensorstore::TransformedSharedArray<int, 1>>);
auto ta_copy = ta;
EXPECT_EQ(4, a.pointer().use_count());
auto tc = tensorstore::TransformedArray(std::move(ta_copy));
static_assert(std::is_same_v<decltype(tc),
tensorstore::TransformedSharedArray<int, 1>>);
EXPECT_EQ(a.element_pointer(), tc.element_pointer());
EXPECT_EQ(4, a.pointer().use_count());
EXPECT_FALSE(ta_copy.element_pointer());
}
TEST(TransformedArrayTest, MapTransform) {
auto array = MakeArray<int>({1, 2, 3});
tensorstore::TransformedArray<int, 1> tarray(array);
auto tarray2 =
ChainResult(tarray, tensorstore::Dims(0).SizedInterval(1, 2)).value();
EXPECT_EQ(MakeOffsetArray<int>({1}, {2, 3}), tarray2.Materialize().value());
}
TEST(TransformedArrayTest, ArrayAndTransform) {
auto a = MakeOffsetArray<int>({3}, {1, 2, 3});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto t, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(0, 3, 1, 0)
.Finalize()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(a, t));
static_assert(std::is_same_v<decltype(ta),
tensorstore::TransformedSharedArray<int, 1>>);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto expected_transform, (tensorstore::IndexTransformBuilder<1, 1>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_single_input_dimension(
0, 3 * sizeof(int), 1 * sizeof(int), 0)
.Finalize()));
EXPECT_EQ(expected_transform, ta.transform());
}
TEST(TransformedArrayTest, DimExpression) {
auto a = MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}});
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto ta, a |
tensorstore::Dims(0, 1).IndexVectorArraySlice(
MakeArray<Index>({{10, 22}, {11, 21}, {11, 22}})) |
tensorstore::Dims(0).Label("a"));
EXPECT_EQ(ta.transform(),
(tensorstore::IndexTransformBuilder<1, 2>()
.input_origin({0})
.input_shape({3})
.input_labels({"a"})
.output_index_array(0, 0, sizeof(int) * 3,
MakeArray<Index>({10, 11, 11}),
IndexInterval::Sized(10, 2))
.output_index_array(1, 0, sizeof(int),
MakeArray<Index>({22, 21, 22}),
IndexInterval::Sized(20, 3))
.Finalize()
.value()));
EXPECT_EQ(a.element_pointer(), ta.element_pointer());
EXPECT_EQ(ta.domain().box(), tensorstore::BoxView<1>({3}));
}
TEST(TransformedArrayTest, MaterializeWithOffsetOrigin) {
EXPECT_EQ(MakeOffsetArray<int>({2}, {3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.Materialize());
}
TEST(TransformedArrayTest, MaterializeWithZeroOrigin) {
EXPECT_EQ(MakeArray<int>({3, 5, 6}),
ChainResult(MakeOffsetArray<int>({10, 20}, {{1, 2, 3}, {4, 5, 6}}),
tensorstore::Dims(0, 1)
.IndexVectorArraySlice(MakeArray<Index>(
{{10, 22}, {11, 21}, {11, 22}}))
.TranslateTo(2))
.value()
.template Materialize<tensorstore::zero_origin>()
.value());
}
TEST(TransformedArrayTest, MaterializeConstraints) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto transformed_array =
ChainResult(array,
tensorstore::Dims(1)
.ClosedInterval(kImplicit, kImplicit, 2)
.MoveToFront(),
tensorstore::Dims(2).AddNew().SizedInterval(5, 3))
.value();
auto expected_array = MakeOffsetArray<int>(
{1, 2, 5}, {{{3, 3, 3}, {6, 6, 6}}, {{5, 5, 5}, {8, 8, 8}}});
{
auto new_array = transformed_array.Materialize().value();
EXPECT_EQ(GetPointers(transformed_array), GetPointers(new_array));
}
const auto ValidateCopy =
[&](const Result<tensorstore::SharedOffsetArray<const int, 3>>& new_array,
const std::vector<Index>& expected_byte_strides) {
TENSORSTORE_ASSERT_OK(new_array);
EXPECT_NE(GetPointers(transformed_array), GetPointers(*new_array));
EXPECT_EQ(expected_array, *new_array);
EXPECT_THAT(new_array->byte_strides(),
::testing::ElementsAreArray(expected_byte_strides));
};
const auto TestCopyAndMaterialize =
[&](tensorstore::TransformArrayConstraints constraints,
std::vector<Index> expected_byte_strides) {
SCOPED_TRACE(tensorstore::StrCat("TestCopyAndMaterialize: constraints=",
constraints.value()));
{
SCOPED_TRACE("Materialize");
auto new_array = transformed_array.Materialize(constraints);
static_assert(std::is_same_v<
decltype(new_array),
Result<tensorstore::SharedOffsetArray<const int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
{
SCOPED_TRACE("MakeCopy");
auto new_array =
MakeCopy(transformed_array, constraints.iteration_constraints());
static_assert(
std::is_same_v<decltype(new_array),
Result<tensorstore::SharedOffsetArray<int, 3>>>);
ValidateCopy(new_array, expected_byte_strides);
}
};
TestCopyAndMaterialize(
{tensorstore::skip_repeated_elements, tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2, sizeof(int), 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::skip_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, 0});
TestCopyAndMaterialize(
{tensorstore::fortran_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int), sizeof(int) * 2, sizeof(int) * 2 * 2});
TestCopyAndMaterialize(
{tensorstore::c_order, tensorstore::include_repeated_elements,
tensorstore::must_allocate},
{sizeof(int) * 2 * 3, sizeof(int) * 3, sizeof(int)});
}
TEST(TransformedArrayTest, MaterializeError) {
EXPECT_THAT(
ChainResult(MakeArray<int>({1, 2}), tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()
.Materialize(),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MakeCopy) {
EXPECT_THAT(MakeCopy(ChainResult(MakeArray<int>({1, 2}),
tensorstore::Dims(0).IndexArraySlice(
MakeArray<Index>({3, 4})))
.value()),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(TransformedArrayTest, MoveConstructViewFromContainer) {
MapResult(
[](tensorstore::TransformedSharedArrayView<const void> x) {
EXPECT_EQ(tensorstore::BoxView({2, 3}, {2, 2}), GetBoxDomainOf(x));
return absl::OkStatus();
},
tensorstore::MakeTransformedArray(
tensorstore::MakeOffsetArray<int>({2, 3}, {{1, 2}, {3, 4}}),
tensorstore::IdentityTransform(tensorstore::BoxView({2, 3}, {2, 2}))))
.value();
}
TEST(ComposeLayoutAndTransformTest, NoTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransform<>())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({1, 2})
.input_shape({3, 4})
.output_single_input_dimension(0, 0, 5, 0)
.output_single_input_dimension(1, 0, 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, ExistingTransform) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
auto transform = tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10, 1, 0)
.output_single_input_dimension(1, -22, 2, 1)
.Finalize()
.value())
.value();
EXPECT_EQ(transform, tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({11, 12})
.input_shape({3, 2})
.input_labels({"x", "y"})
.output_single_input_dimension(0, -10 * 5, 1 * 5, 0)
.output_single_input_dimension(1, -22 * 6, 2 * 6, 1)
.Finalize()
.value());
}
TEST(ComposeLayoutAndTransformTest, RankMismatch) {
tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>
layout({1, 2}, {3, 4}, {5, 6});
EXPECT_THAT(tensorstore::ComposeLayoutAndTransform(
layout, tensorstore::IdentityTransform(3)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Transform output rank \\(3\\) does not equal "
"array rank \\(2\\)"));
}
TEST(MakeTransformedArrayTest, TwoArgumentBaseArrayAndTransform) {
auto array = MakeOffsetArray<int>({2, 3}, {{3, 4, 5}, {6, 7, 8}});
auto t = tensorstore::IndexTransformBuilder<1, 2>()
.implicit_lower_bounds({1})
.implicit_upper_bounds({1})
.output_single_input_dimension(0, 1, 1, 0)
.output_single_input_dimension(1, 2, 1, 0)
.Finalize()
.value();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto ta,
tensorstore::MakeTransformedArray(array, t));
EXPECT_EQ(array.element_pointer(), ta.element_pointer());
EXPECT_EQ(
tensorstore::IndexTransformBuilder<>(1, 2)
.input_origin({1})
.input_shape({2})
.output_single_input_dimension(0, sizeof(int) * 3, sizeof(int) * 3, 0)
.output_single_input_dimension(1, sizeof(int) * 2, sizeof(int), 0)
.Finalize()
.value(),
ta.transform());
}
TEST(GetUnboundedLayoutTest, Basic) {
EXPECT_EQ((tensorstore::StridedLayout<tensorstore::dynamic_rank,
tensorstore::offset_origin>(
{-kInfIndex, -kInfIndex}, {kInfSize, kInfSize}, {1, 1})),
tensorstore::internal_index_space::GetUnboundedLayout(2));
}
TEST(TransformedArrayTest, StaticDataTypeCast) {
TransformedArray<int32_t, 1> ta_orig = MakeArray<int32_t>({3, 4});
TransformedArray<void, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(
std::is_same_v<decltype(ta_int), Result<TransformedArray<int, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, CastArrayToTransformedArray) {
tensorstore::SharedArray<int32_t> a = MakeArray<int32_t>({1, 2});
auto ta_result =
tensorstore::StaticCast<tensorstore::TransformedArrayView<int32_t, 1>>(a);
TENSORSTORE_ASSERT_OK(ta_result);
EXPECT_THAT(GetPointers(*ta_result), ::testing::ElementsAre(&a(0), &a(1)));
}
TEST(TransformedArrayTest, StaticDataTypeCastShared) {
auto ta_orig = tensorstore::TransformedArray(MakeArray<int32_t>({3, 4}));
TransformedArray<Shared<void>, 1> ta = ta_orig;
auto ta_int = StaticDataTypeCast<int32_t>(ta);
static_assert(std::is_same_v<decltype(ta_int),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta_int);
EXPECT_THAT(GetPointers(*ta_int),
::testing::ElementsAreArray(GetPointers(ta_orig)));
}
TEST(TransformedArrayTest, StaticRankCast) {
TransformedArray<Shared<int32_t>, dynamic_rank> ta =
MakeArray<int32_t>({3, 4});
auto ta1 = StaticRankCast<1>(ta);
static_assert(std::is_same_v<decltype(ta1),
Result<TransformedArray<Shared<int32_t>, 1>>>);
ASSERT_TRUE(ta1);
EXPECT_THAT(GetPointers(*ta1), ::testing::ElementsAreArray(GetPointers(ta)));
EXPECT_THAT(
StaticRankCast<2>(ta),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Cannot cast transformed array with data type of int32 and rank of 1 "
"to transformed array with data type of int32 and rank of 2"));
}
TEST(TransformedArrayTest, ApplyIndexTransform) {
auto array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto result = ChainResult(array, tensorstore::IdentityTransform<2>());
TENSORSTORE_ASSERT_OK(result);
EXPECT_EQ(array, MakeCopy(*result));
}
TEST(CopyTransformedArrayTest, Int32ToUint32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<uint32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<uint32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToInt32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<int32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
a, ChainResult(b, tensorstore::Dims(1, 0).Transpose())));
EXPECT_EQ(b, MakeArray<int32_t>({{1, 4}, {2, 5}, {3, 6}}));
}
TEST(CopyTransformedArrayTest, Int32ToFloat32) {
auto a = MakeArray<int32_t>({{1, 2, 3}, {4, 5, 6}});
auto b = tensorstore::AllocateArray<float32_t>({3, 2});
EXPECT_EQ(absl::OkStatus(),
CopyTransformedArray(
ChainResult(a, tensorstore::Dims(1, 0).Transpose()), b));
EXPECT_EQ(b, MakeArray<float32_t>({{1.0, 4.0}, {2.0, 5.0}, {3.0, 6.0}}));
}
TEST(CopyTransformedArrayTest, InvalidDataType) {
auto a = MakeArray<::tensorstore::dtypes::string_t>({"x", "y"});
auto b = tensorstore::AllocateArray<float32_t>({2});
EXPECT_THAT(CopyTransformedArray(a, b),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot convert string -> float32"));
}
TEST(TransformedArrayTest, UnownedToShared) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
auto shared_ta = UnownedToShared(ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
}
TEST(TransformedArrayTest, UnownedToSharedAliasing) {
auto a = MakeArray<int>({1, 2, 3});
TransformedArray<int> ta = a;
EXPECT_EQ(1, a.pointer().use_count());
{
auto shared_ta = UnownedToShared(a.pointer(), ta);
EXPECT_EQ(2, a.pointer().use_count());
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
auto shared_ta_copy = UnownedToShared(shared_ta);
static_assert(
std::is_same_v<decltype(shared_ta), TransformedArray<Shared<int>>>);
EXPECT_EQ(3, a.pointer().use_count());
}
EXPECT_EQ(1, a.pointer().use_count());
}
TEST(TryConvertToArrayTest, Basic) {
auto array = tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
EXPECT_THAT(array | tensorstore::IdentityTransform<2>() |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array | tensorstore::Dims(0).IndexSlice(1) |
tensorstore::TryConvertToArray(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array[1])));
EXPECT_THAT(array | tensorstore::Dims(0).TranslateTo(1) |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(tensorstore::ReferencesSameDataAs(array)));
EXPECT_THAT(array |
tensorstore::Dims(0).OuterIndexArraySlice(
tensorstore::MakeArray<Index>({0, 1, 1})) |
tensorstore::TryConvertToArray(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(TryConvertToArrayTest, Random) {
tensorstore::SharedArray<const void> array =
tensorstore::AllocateArray<int32_t>({2, 3}, tensorstore::c_order,
tensorstore::value_init);
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_VIEW_AS_ARRAY")};
constexpr size_t kNumIterations = 10;
for (size_t iter_i = 0; iter_i < kNumIterations; ++iter_i) {
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters p;
p.max_stride = 2;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IndexDomain<>(array.domain()), p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_zero_origin,
array | transform |
tensorstore::Materialize<tensorstore::zero_origin>());
EXPECT_THAT(array | transform |
tensorstore::TryConvertToArray<tensorstore::zero_origin>(),
::testing::Optional(materialized_zero_origin));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto materialized_offset_origin,
array | transform | tensorstore::Materialize());
EXPECT_THAT(array | transform | tensorstore::TryConvertToArray(),
::testing::Optional(materialized_offset_origin));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transformed_array.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transformed_array_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4538cf80-68bd-46c3-8ce6-1a1a490cd832 | cpp | google/cel-cpp | flat_expr_builder | eval/compiler/flat_expr_builder.cc | eval/compiler/flat_expr_builder_test.cc | #include "eval/compiler/flat_expr_builder.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <deque>
#include <iterator>
#include <memory>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "base/ast.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "base/builtins.h"
#include "common/ast.h"
#include "common/ast_traverse.h"
#include "common/ast_visitor.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/compiler/flat_expr_builder_extensions.h"
#include "eval/compiler/resolver.h"
#include "eval/eval/comprehension_step.h"
#include "eval/eval/const_value_step.h"
#include "eval/eval/container_access_step.h"
#include "eval/eval/create_list_step.h"
#include "eval/eval/create_map_step.h"
#include "eval/eval/create_struct_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/function_step.h"
#include "eval/eval/ident_step.h"
#include "eval/eval/jump_step.h"
#include "eval/eval/lazy_init_step.h"
#include "eval/eval/logic_step.h"
#include "eval/eval/optional_or_step.h"
#include "eval/eval/select_step.h"
#include "eval/eval/shadowable_value_step.h"
#include "eval/eval/ternary_step.h"
#include "eval/eval/trace_step.h"
#include "internal/status_macros.h"
#include "runtime/internal/convert_constant.h"
#include "runtime/internal/issue_collector.h"
#include "runtime/runtime_issue.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Ast;
using ::cel::AstTraverse;
using ::cel::RuntimeIssue;
using ::cel::StringValue;
using ::cel::Value;
using ::cel::ValueManager;
using ::cel::ast_internal::AstImpl;
using ::cel::runtime_internal::ConvertConstant;
using ::cel::runtime_internal::IssueCollector;
constexpr absl::string_view kOptionalOrFn = "or";
constexpr absl::string_view kOptionalOrValueFn = "orValue";
class FlatExprVisitor;
class IndexManager {
public:
IndexManager() : next_free_slot_(0), max_slot_count_(0) {}
size_t ReserveSlots(size_t n) {
size_t result = next_free_slot_;
next_free_slot_ += n;
if (next_free_slot_ > max_slot_count_) {
max_slot_count_ = next_free_slot_;
}
return result;
}
size_t ReleaseSlots(size_t n) {
next_free_slot_ -= n;
return next_free_slot_;
}
size_t max_slot_count() const { return max_slot_count_; }
private:
size_t next_free_slot_;
size_t max_slot_count_;
};
struct ProgramStepIndex {
int index;
ProgramBuilder::Subexpression* subexpression;
};
class Jump {
public:
explicit Jump() : self_index_{-1, nullptr}, jump_step_(nullptr) {}
Jump(ProgramStepIndex self_index, JumpStepBase* jump_step)
: self_index_(self_index), jump_step_(jump_step) {}
static absl::StatusOr<int> CalculateOffset(ProgramStepIndex base,
ProgramStepIndex target) {
if (target.subexpression != base.subexpression) {
return absl::InternalError(
"Jump target must be contained in the parent"
"subexpression");
}
int offset = base.subexpression->CalculateOffset(base.index, target.index);
return offset;
}
absl::Status set_target(ProgramStepIndex target) {
CEL_ASSIGN_OR_RETURN(int offset, CalculateOffset(self_index_, target));
jump_step_->set_jump_offset(offset);
return absl::OkStatus();
}
bool exists() { return jump_step_ != nullptr; }
private:
ProgramStepIndex self_index_;
JumpStepBase* jump_step_;
};
class CondVisitor {
public:
virtual ~CondVisitor() = default;
virtual void PreVisit(const cel::ast_internal::Expr* expr) = 0;
virtual void PostVisitArg(int arg_num,
const cel::ast_internal::Expr* expr) = 0;
virtual void PostVisit(const cel::ast_internal::Expr* expr) = 0;
virtual void PostVisitTarget(const cel::ast_internal::Expr* expr) {}
};
enum class BinaryCond {
kAnd = 0,
kOr,
kOptionalOr,
kOptionalOrValue,
};
class BinaryCondVisitor : public CondVisitor {
public:
explicit BinaryCondVisitor(FlatExprVisitor* visitor, BinaryCond cond,
bool short_circuiting)
: visitor_(visitor), cond_(cond), short_circuiting_(short_circuiting) {}
void PreVisit(const cel::ast_internal::Expr* expr) override;
void PostVisitArg(int arg_num, const cel::ast_internal::Expr* expr) override;
void PostVisit(const cel::ast_internal::Expr* expr) override;
void PostVisitTarget(const cel::ast_internal::Expr* expr) override;
private:
FlatExprVisitor* visitor_;
const BinaryCond cond_;
Jump jump_step_;
bool short_circuiting_;
};
class TernaryCondVisitor : public CondVisitor {
public:
explicit TernaryCondVisitor(FlatExprVisitor* visitor) : visitor_(visitor) {}
void PreVisit(const cel::ast_internal::Expr* expr) override;
void PostVisitArg(int arg_num, const cel::ast_internal::Expr* expr) override;
void PostVisit(const cel::ast_internal::Expr* expr) override;
private:
FlatExprVisitor* visitor_;
Jump jump_to_second_;
Jump error_jump_;
Jump jump_after_first_;
};
class ExhaustiveTernaryCondVisitor : public CondVisitor {
public:
explicit ExhaustiveTernaryCondVisitor(FlatExprVisitor* visitor)
: visitor_(visitor) {}
void PreVisit(const cel::ast_internal::Expr* expr) override;
void PostVisitArg(int arg_num, const cel::ast_internal::Expr* expr) override {
}
void PostVisit(const cel::ast_internal::Expr* expr) override;
private:
FlatExprVisitor* visitor_;
};
bool IsOptimizableListAppend(
const cel::ast_internal::Comprehension* comprehension,
bool enable_comprehension_list_append) {
if (!enable_comprehension_list_append) {
return false;
}
absl::string_view accu_var = comprehension->accu_var();
if (accu_var.empty() ||
comprehension->result().ident_expr().name() != accu_var) {
return false;
}
if (!comprehension->accu_init().has_list_expr()) {
return false;
}
if (!comprehension->loop_step().has_call_expr()) {
return false;
}
const auto* call_expr = &comprehension->loop_step().call_expr();
if (call_expr->function() == cel::builtin::kTernary &&
call_expr->args().size() == 3) {
if (!call_expr->args()[1].has_call_expr()) {
return false;
}
call_expr = &(call_expr->args()[1].call_expr());
}
return call_expr->function() == cel::builtin::kAdd &&
call_expr->args().size() == 2 &&
call_expr->args()[0].has_ident_expr() &&
call_expr->args()[0].ident_expr().name() == accu_var;
}
bool IsBind(const cel::ast_internal::Comprehension* comprehension) {
static constexpr absl::string_view kUnusedIterVar = "#unused";
return comprehension->loop_condition().const_expr().has_bool_value() &&
comprehension->loop_condition().const_expr().bool_value() == false &&
comprehension->iter_var() == kUnusedIterVar &&
comprehension->iter_range().has_list_expr() &&
comprehension->iter_range().list_expr().elements().empty();
}
bool IsBlock(const cel::ast_internal::Call* call) {
return call->function() == "cel.@block";
}
class ComprehensionVisitor {
public:
explicit ComprehensionVisitor(FlatExprVisitor* visitor, bool short_circuiting,
bool is_trivial, size_t iter_slot,
size_t accu_slot)
: visitor_(visitor),
next_step_(nullptr),
cond_step_(nullptr),
short_circuiting_(short_circuiting),
is_trivial_(is_trivial),
accu_init_extracted_(false),
iter_slot_(iter_slot),
accu_slot_(accu_slot) {}
void PreVisit(const cel::ast_internal::Expr* expr);
absl::Status PostVisitArg(cel::ComprehensionArg arg_num,
const cel::ast_internal::Expr* comprehension_expr) {
if (is_trivial_) {
PostVisitArgTrivial(arg_num, comprehension_expr);
return absl::OkStatus();
} else {
return PostVisitArgDefault(arg_num, comprehension_expr);
}
}
void PostVisit(const cel::ast_internal::Expr* expr);
void MarkAccuInitExtracted() { accu_init_extracted_ = true; }
private:
void PostVisitArgTrivial(cel::ComprehensionArg arg_num,
const cel::ast_internal::Expr* comprehension_expr);
absl::Status PostVisitArgDefault(
cel::ComprehensionArg arg_num,
const cel::ast_internal::Expr* comprehension_expr);
FlatExprVisitor* visitor_;
ComprehensionNextStep* next_step_;
ComprehensionCondStep* cond_step_;
ProgramStepIndex next_step_pos_;
ProgramStepIndex cond_step_pos_;
bool short_circuiting_;
bool is_trivial_;
bool accu_init_extracted_;
size_t iter_slot_;
size_t accu_slot_;
};
absl::flat_hash_set<int32_t> MakeOptionalIndicesSet(
const cel::ast_internal::CreateList& create_list_expr) {
absl::flat_hash_set<int32_t> optional_indices;
for (size_t i = 0; i < create_list_expr.elements().size(); ++i) {
if (create_list_expr.elements()[i].optional()) {
optional_indices.insert(static_cast<int32_t>(i));
}
}
return optional_indices;
}
absl::flat_hash_set<int32_t> MakeOptionalIndicesSet(
const cel::ast_internal::CreateStruct& create_struct_expr) {
absl::flat_hash_set<int32_t> optional_indices;
for (size_t i = 0; i < create_struct_expr.fields().size(); ++i) {
if (create_struct_expr.fields()[i].optional()) {
optional_indices.insert(static_cast<int32_t>(i));
}
}
return optional_indices;
}
absl::flat_hash_set<int32_t> MakeOptionalIndicesSet(
const cel::MapExpr& map_expr) {
absl::flat_hash_set<int32_t> optional_indices;
for (size_t i = 0; i < map_expr.entries().size(); ++i) {
if (map_expr.entries()[i].optional()) {
optional_indices.insert(static_cast<int32_t>(i));
}
}
return optional_indices;
}
class FlatExprVisitor : public cel::AstVisitor {
public:
FlatExprVisitor(
const Resolver& resolver, const cel::RuntimeOptions& options,
std::vector<std::unique_ptr<ProgramOptimizer>> program_optimizers,
const absl::flat_hash_map<int64_t, cel::ast_internal::Reference>&
reference_map,
ValueManager& value_factory, IssueCollector& issue_collector,
ProgramBuilder& program_builder, PlannerContext& extension_context,
bool enable_optional_types)
: resolver_(resolver),
value_factory_(value_factory),
progress_status_(absl::OkStatus()),
resolved_select_expr_(nullptr),
options_(options),
program_optimizers_(std::move(program_optimizers)),
issue_collector_(issue_collector),
program_builder_(program_builder),
extension_context_(extension_context),
enable_optional_types_(enable_optional_types) {}
void PreVisitExpr(const cel::ast_internal::Expr& expr) override {
ValidateOrError(!absl::holds_alternative<cel::UnspecifiedExpr>(expr.kind()),
"Invalid empty expression");
if (!progress_status_.ok()) {
return;
}
if (resume_from_suppressed_branch_ == nullptr &&
suppressed_branches_.find(&expr) != suppressed_branches_.end()) {
resume_from_suppressed_branch_ = &expr;
}
if (block_.has_value()) {
BlockInfo& block = *block_;
if (block.in && block.bindings_set.contains(&expr)) {
block.current_binding = &expr;
}
}
program_builder_.EnterSubexpression(&expr);
for (const std::unique_ptr<ProgramOptimizer>& optimizer :
program_optimizers_) {
absl::Status status = optimizer->OnPreVisit(extension_context_, expr);
if (!status.ok()) {
SetProgressStatusError(status);
}
}
}
void PostVisitExpr(const cel::ast_internal::Expr& expr) override {
if (!progress_status_.ok()) {
return;
}
if (&expr == resume_from_suppressed_branch_) {
resume_from_suppressed_branch_ = nullptr;
}
for (const std::unique_ptr<ProgramOptimizer>& optimizer :
program_optimizers_) {
absl::Status status = optimizer->OnPostVisit(extension_context_, expr);
if (!status.ok()) {
SetProgressStatusError(status);
return;
}
}
auto* subexpression = program_builder_.current();
if (subexpression != nullptr && options_.enable_recursive_tracing &&
subexpression->IsRecursive()) {
auto program = subexpression->ExtractRecursiveProgram();
subexpression->set_recursive_program(
std::make_unique<TraceStep>(std::move(program.step)), program.depth);
}
program_builder_.ExitSubexpression(&expr);
if (!comprehension_stack_.empty() &&
comprehension_stack_.back().is_optimizable_bind &&
(&comprehension_stack_.back().comprehension->accu_init() == &expr)) {
SetProgressStatusError(
MaybeExtractSubexpression(&expr, comprehension_stack_.back()));
}
if (block_.has_value()) {
BlockInfo& block = *block_;
if (block.current_binding == &expr) {
int index = program_builder_.ExtractSubexpression(&expr);
if (index == -1) {
SetProgressStatusError(
absl::InvalidArgumentError("failed to extract subexpression"));
return;
}
block.subexpressions[block.current_index++] = index;
block.current_binding = nullptr;
}
}
}
void PostVisitConst(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Constant& const_expr) override {
if (!progress_status_.ok()) {
return;
}
absl::StatusOr<cel::Value> converted_value =
ConvertConstant(const_expr, value_factory_);
if (!converted_value.ok()) {
SetProgressStatusError(converted_value.status());
return;
}
if (options_.max_recursion_depth > 0 || options_.max_recursion_depth < 0) {
SetRecursiveStep(CreateConstValueDirectStep(
std::move(converted_value).value(), expr.id()),
1);
return;
}
AddStep(
CreateConstValueStep(std::move(converted_value).value(), expr.id()));
}
struct SlotLookupResult {
int slot;
int subexpression;
};
SlotLookupResult LookupSlot(absl::string_view path) {
if (block_.has_value()) {
const BlockInfo& block = *block_;
if (block.in) {
absl::string_view index_suffix = path;
if (absl::ConsumePrefix(&index_suffix, "@index")) {
size_t index;
if (!absl::SimpleAtoi(index_suffix, &index)) {
SetProgressStatusError(
issue_collector_.AddIssue(RuntimeIssue::CreateError(
absl::InvalidArgumentError("bad @index"))));
return {-1, -1};
}
if (index >= block.size) {
SetProgressStatusError(
issue_collector_.AddIssue(RuntimeIssue::CreateError(
absl::InvalidArgumentError(absl::StrCat(
"invalid @index greater than number of bindings: ",
index, " >= ", block.size)))));
return {-1, -1};
}
if (index >= block.current_index) {
SetProgressStatusError(
issue_collector_.AddIssue(RuntimeIssue::CreateError(
absl::InvalidArgumentError(absl::StrCat(
"@index references current or future binding: ", index,
" >= ", block.current_index)))));
return {-1, -1};
}
return {static_cast<int>(block.index + index),
block.subexpressions[index]};
}
}
}
if (!comprehension_stack_.empty()) {
for (int i = comprehension_stack_.size() - 1; i >= 0; i--) {
const ComprehensionStackRecord& record = comprehension_stack_[i];
if (record.iter_var_in_scope &&
record.comprehension->iter_var() == path) {
if (record.is_optimizable_bind) {
SetProgressStatusError(issue_collector_.AddIssue(
RuntimeIssue::CreateWarning(absl::InvalidArgumentError(
"Unexpected iter_var access in trivial comprehension"))));
return {-1, -1};
}
return {static_cast<int>(record.iter_slot), -1};
}
if (record.accu_var_in_scope &&
record.comprehension->accu_var() == path) {
int slot = record.accu_slot;
int subexpression = -1;
if (record.is_optimizable_bind) {
subexpression = record.subexpression;
}
return {slot, subexpression};
}
}
}
if (absl::StartsWith(path, "@it:") || absl::StartsWith(path, "@it2:") ||
absl::StartsWith(path, "@ac:")) {
SetProgressStatusError(
issue_collector_.AddIssue(RuntimeIssue::CreateError(
absl::InvalidArgumentError("out of scope reference to CSE "
"generated comprehension variable"))));
}
return {-1, -1};
}
void PostVisitIdent(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Ident& ident_expr) override {
if (!progress_status_.ok()) {
return;
}
std::string path = ident_expr.name();
if (!ValidateOrError(
!path.empty(),
"Invalid expression: identifier 'name' must not be empty")) {
return;
}
absl::optional<cel::Value> const_value;
int64_t select_root_id = -1;
while (!namespace_stack_.empty()) {
const auto& select_node = namespace_stack_.front();
auto select_expr = select_node.first;
auto qualified_path = absl::StrCat(path, ".", select_node.second);
const_value = resolver_.FindConstant(qualified_path, select_expr->id());
if (const_value) {
resolved_select_expr_ = select_expr;
select_root_id = select_expr->id();
path = qualified_path;
namespace_stack_.clear();
break;
}
namespace_stack_.pop_front();
}
if (!const_value) {
const_value = resolver_.FindConstant(path, expr.id());
select_root_id = expr.id();
}
if (const_value) {
if (options_.max_recursion_depth != 0) {
SetRecursiveStep(CreateDirectShadowableValueStep(
std::move(path), std::move(const_value).value(),
select_root_id),
1);
return;
}
AddStep(CreateShadowableValueStep(
std::move(path), std::move(const_value).value(), select_root_id));
return;
}
SlotLookupResult slot = LookupSlot(path);
if (slot.subexpression >= 0) {
auto* subexpression =
program_builder_.GetExtractedSubexpression(slot.subexpression);
if (subexpression == nullptr) {
SetProgressStatusError(
absl::InternalError("bad subexpression reference"));
return;
}
if (subexpression->IsRecursive()) {
const auto& program = subexpression->recursive_program();
SetRecursiveStep(
CreateDirectLazyInitStep(slot.slot, program.step.get(), expr.id()),
program.depth + 1);
} else {
AddStep(
CreateLazyInitStep(slot.slot, slot.subexpression + 1, expr.id()));
}
return;
} else if (slot.slot >= 0) {
if (options_.max_recursion_depth != 0) {
SetRecursiveStep(
CreateDirectSlotIdentStep(ident_expr.name(), slot.slot, expr.id()),
1);
} else {
AddStep(CreateIdentStepForSlot(ident_expr, slot.slot, expr.id()));
}
return;
}
if (options_.max_recursion_depth != 0) {
SetRecursiveStep(CreateDirectIdentStep(ident_expr.name(), expr.id()), 1);
} else {
AddStep(CreateIdentStep(ident_expr, expr.id()));
}
}
void PreVisitSelect(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Select& select_expr) override {
if (!progress_status_.ok()) {
return;
}
if (!ValidateOrError(
!select_expr.field().empty(),
"Invalid expression: select 'field' must not be empty")) {
return;
}
if (!select_expr.test_only() && (select_expr.operand().has_ident_expr() ||
select_expr.operand().has_select_expr())) {
for (size_t i = 0; i < namespace_stack_.size(); i++) {
auto ns = namespace_stack_[i];
namespace_stack_[i] = {
ns.first, absl::StrCat(select_expr.field(), ".", ns.second)};
}
namespace_stack_.push_back({&expr, select_expr.field()});
} else {
namespace_stack_.clear();
}
}
void PostVisitSelect(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Select& select_expr) override {
if (!progress_status_.ok()) {
return;
}
if (resolved_select_expr_) {
if (&expr == resolved_select_expr_) {
resolved_select_expr_ = nullptr;
}
return;
}
auto depth = RecursionEligible();
if (depth.has_value()) {
auto deps = ExtractRecursiveDependencies();
if (deps.size() != 1) {
SetProgressStatusError(absl::InternalError(
"unexpected number of dependencies for select operation."));
return;
}
StringValue field =
value_factory_.CreateUncheckedStringValue(select_expr.field());
SetRecursiveStep(
CreateDirectSelectStep(std::move(deps[0]), std::move(field),
select_expr.test_only(), expr.id(),
options_.enable_empty_wrapper_null_unboxing,
enable_optional_types_),
*depth + 1);
return;
}
AddStep(CreateSelectStep(select_expr, expr.id(),
options_.enable_empty_wrapper_null_unboxing,
value_factory_, enable_optional_types_));
}
void PreVisitCall(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Call& call_expr) override {
if (!progress_status_.ok()) {
return;
}
std::unique_ptr<CondVisitor> cond_visitor;
if (call_expr.function() == cel::builtin::kAnd) {
cond_visitor = std::make_unique<BinaryCondVisitor>(
this, BinaryCond::kAnd, options_.short_circuiting);
} else if (call_expr.function() == cel::builtin::kOr) {
cond_visitor = std::make_unique<BinaryCondVisitor>(
this, BinaryCond::kOr, options_.short_circuiting);
} else if (call_expr.function() == cel::builtin::kTernary) {
if (options_.short_circuiting) {
cond_visitor = std::make_unique<TernaryCondVisitor>(this);
} else {
cond_visitor = std::make_unique<ExhaustiveTernaryCondVisitor>(this);
}
} else if (enable_optional_types_ &&
call_expr.function() == kOptionalOrFn &&
call_expr.has_target() && call_expr.args().size() == 1) {
cond_visitor = std::make_unique<BinaryCondVisitor>(
this, BinaryCond::kOptionalOr, options_.short_circuiting);
} else if (enable_optional_types_ &&
call_expr.function() == kOptionalOrValueFn &&
call_expr.has_target() && call_expr.args().size() == 1) {
cond_visitor = std::make_unique<BinaryCondVisitor>(
this, BinaryCond::kOptionalOrValue, options_.short_circuiting);
} else if (IsBlock(&call_expr)) {
if (block_.has_value()) {
SetProgressStatusError(
absl::InvalidArgumentError("multiple cel.@block are not allowed"));
return;
}
block_ = BlockInfo();
BlockInfo& block = *block_;
block.in = true;
if (call_expr.args().empty()) {
SetProgressStatusError(absl::InvalidArgumentError(
"malformed cel.@block: missing list of bound expressions"));
return;
}
if (call_expr.args().size() != 2) {
SetProgressStatusError(absl::InvalidArgumentError(
"malformed cel.@block: missing bound expression"));
return;
}
if (!call_expr.args()[0].has_list_expr()) {
SetProgressStatusError(
absl::InvalidArgumentError("malformed cel.@block: first argument "
"is not a list of bound expressions"));
return;
}
const auto& list_expr = call_expr.args().front().list_expr();
block.size = list_expr.elements().size();
if (block.size == 0) {
SetProgressStatusError(absl::InvalidArgumentError(
"malformed cel.@block: list of bound expressions is empty"));
return;
}
block.bindings_set.reserve(block.size);
for (const auto& list_expr_element : list_expr.elements()) {
if (list_expr_element.optional()) {
SetProgressStatusError(
absl::InvalidArgumentError("malformed cel.@block: list of bound "
"expressions contains an optional"));
return;
}
block.bindings_set.insert(&list_expr_element.expr());
}
block.index = index_manager().ReserveSlots(block.size);
block.expr = &expr;
block.bindings = &call_expr.args()[0];
block.bound = &call_expr.args()[1];
block.subexpressions.resize(block.size, -1);
} else {
return;
}
if (cond_visitor) {
cond_visitor->PreVisit(&expr);
cond_visitor_stack_.push({&expr, std::move(cond_visitor)});
}
}
absl::optional<int> RecursionEligible() {
if (program_builder_.current() == nullptr) {
return absl::nullopt;
}
absl::optional<int> depth =
program_builder_.current()->RecursiveDependencyDepth();
if (!depth.has_value()) {
return depth;
}
if (options_.max_recursion_depth < 0 ||
*depth < options_.max_recursion_depth) {
return depth;
}
return absl::nullopt;
}
std::vector<std::unique_ptr<DirectExpressionStep>>
ExtractRecursiveDependencies() {
ABSL_DCHECK(program_builder_.current() != nullptr);
return program_builder_.current()->ExtractRecursiveDependencies();
}
void MaybeMakeTernaryRecursive(const cel::ast_internal::Expr* expr) {
if (options_.max_recursion_depth == 0) {
return;
}
if (expr->call_expr().args().size() != 3) {
SetProgressStatusError(absl::InvalidArgumentError(
"unexpected number of args for builtin ternary"));
}
const cel::ast_internal::Expr* condition_expr =
&expr->call_expr().args()[0];
const cel::ast_internal::Expr* left_expr = &expr->call_expr().args()[1];
const cel::ast_internal::Expr* right_expr = &expr->call_expr().args()[2];
auto* condition_plan = program_builder_.GetSubexpression(condition_expr);
auto* left_plan = program_builder_.GetSubexpression(left_expr);
auto* right_plan = program_builder_.GetSubexpression(right_expr);
int max_depth = 0;
if (condition_plan == nullptr || !condition_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, condition_plan->recursive_program().depth);
if (left_plan == nullptr || !left_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, left_plan->recursive_program().depth);
if (right_plan == nullptr || !right_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, right_plan->recursive_program().depth);
if (options_.max_recursion_depth >= 0 &&
max_depth >= options_.max_recursion_depth) {
return;
}
SetRecursiveStep(
CreateDirectTernaryStep(condition_plan->ExtractRecursiveProgram().step,
left_plan->ExtractRecursiveProgram().step,
right_plan->ExtractRecursiveProgram().step,
expr->id(), options_.short_circuiting),
max_depth + 1);
}
void MaybeMakeShortcircuitRecursive(const cel::ast_internal::Expr* expr,
bool is_or) {
if (options_.max_recursion_depth == 0) {
return;
}
if (expr->call_expr().args().size() != 2) {
SetProgressStatusError(absl::InvalidArgumentError(
"unexpected number of args for builtin boolean operator &&/||"));
}
const cel::ast_internal::Expr* left_expr = &expr->call_expr().args()[0];
const cel::ast_internal::Expr* right_expr = &expr->call_expr().args()[1];
auto* left_plan = program_builder_.GetSubexpression(left_expr);
auto* right_plan = program_builder_.GetSubexpression(right_expr);
int max_depth = 0;
if (left_plan == nullptr || !left_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, left_plan->recursive_program().depth);
if (right_plan == nullptr || !right_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, right_plan->recursive_program().depth);
if (options_.max_recursion_depth >= 0 &&
max_depth >= options_.max_recursion_depth) {
return;
}
if (is_or) {
SetRecursiveStep(
CreateDirectOrStep(left_plan->ExtractRecursiveProgram().step,
right_plan->ExtractRecursiveProgram().step,
expr->id(), options_.short_circuiting),
max_depth + 1);
} else {
SetRecursiveStep(
CreateDirectAndStep(left_plan->ExtractRecursiveProgram().step,
right_plan->ExtractRecursiveProgram().step,
expr->id(), options_.short_circuiting),
max_depth + 1);
}
}
void MaybeMakeOptionalShortcircuitRecursive(
const cel::ast_internal::Expr* expr, bool is_or_value) {
if (options_.max_recursion_depth == 0) {
return;
}
if (!expr->call_expr().has_target() ||
expr->call_expr().args().size() != 1) {
SetProgressStatusError(absl::InvalidArgumentError(
"unexpected number of args for optional.or{Value}"));
}
const cel::ast_internal::Expr* left_expr = &expr->call_expr().target();
const cel::ast_internal::Expr* right_expr = &expr->call_expr().args()[0];
auto* left_plan = program_builder_.GetSubexpression(left_expr);
auto* right_plan = program_builder_.GetSubexpression(right_expr);
int max_depth = 0;
if (left_plan == nullptr || !left_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, left_plan->recursive_program().depth);
if (right_plan == nullptr || !right_plan->IsRecursive()) {
return;
}
max_depth = std::max(max_depth, right_plan->recursive_program().depth);
if (options_.max_recursion_depth >= 0 &&
max_depth >= options_.max_recursion_depth) {
return;
}
SetRecursiveStep(CreateDirectOptionalOrStep(
expr->id(), left_plan->ExtractRecursiveProgram().step,
right_plan->ExtractRecursiveProgram().step,
is_or_value, options_.short_circuiting),
max_depth + 1);
}
void MaybeMakeBindRecursive(
const cel::ast_internal::Expr* expr,
const cel::ast_internal::Comprehension* comprehension, size_t accu_slot) {
if (options_.max_recursion_depth == 0) {
return;
}
auto* result_plan =
program_builder_.GetSubexpression(&comprehension->result());
if (result_plan == nullptr || !result_plan->IsRecursive()) {
return;
}
int result_depth = result_plan->recursive_program().depth;
if (options_.max_recursion_depth > 0 &&
result_depth >= options_.max_recursion_depth) {
return;
}
auto program = result_plan->ExtractRecursiveProgram();
SetRecursiveStep(
CreateDirectBindStep(accu_slot, std::move(program.step), expr->id()),
result_depth + 1);
}
void MaybeMakeComprehensionRecursive(
const cel::ast_internal::Expr* expr,
const cel::ast_internal::Comprehension* comprehension, size_t iter_slot,
size_t accu_slot) {
if (options_.max_recursion_depth == 0) {
return;
}
auto* accu_plan =
program_builder_.GetSubexpression(&comprehension->accu_init());
if (accu_plan == nullptr || !accu_plan->IsRecursive()) {
return;
}
auto* range_plan =
program_builder_.GetSubexpression(&comprehension->iter_range());
if (range_plan == nullptr || !range_plan->IsRecursive()) {
return;
}
auto* loop_plan =
program_builder_.GetSubexpression(&comprehension->loop_step());
if (loop_plan == nullptr || !loop_plan->IsRecursive()) {
return;
}
auto* condition_plan =
program_builder_.GetSubexpression(&comprehension->loop_condition());
if (condition_plan == nullptr || !condition_plan->IsRecursive()) {
return;
}
auto* result_plan =
program_builder_.GetSubexpression(&comprehension->result());
if (result_plan == nullptr || !result_plan->IsRecursive()) {
return;
}
int max_depth = 0;
max_depth = std::max(max_depth, accu_plan->recursive_program().depth);
max_depth = std::max(max_depth, range_plan->recursive_program().depth);
max_depth = std::max(max_depth, loop_plan->recursive_program().depth);
max_depth = std::max(max_depth, condition_plan->recursive_program().depth);
max_depth = std::max(max_depth, result_plan->recursive_program().depth);
if (options_.max_recursion_depth > 0 &&
max_depth >= options_.max_recursion_depth) {
return;
}
auto step = CreateDirectComprehensionStep(
iter_slot, accu_slot, range_plan->ExtractRecursiveProgram().step,
accu_plan->ExtractRecursiveProgram().step,
loop_plan->ExtractRecursiveProgram().step,
condition_plan->ExtractRecursiveProgram().step,
result_plan->ExtractRecursiveProgram().step, options_.short_circuiting,
expr->id());
SetRecursiveStep(std::move(step), max_depth + 1);
}
void PostVisitCall(const cel::ast_internal::Expr& expr,
const cel::ast_internal::Call& call_expr) override {
if (!progress_status_.ok()) {
return;
}
auto cond_visitor = FindCondVisitor(&expr);
if (cond_visitor) {
cond_visitor->PostVisit(&expr);
cond_visitor_stack_.pop();
if (call_expr.function() == cel::builtin::kTernary) {
MaybeMakeTernaryRecursive(&expr);
} else if (call_expr.function() == cel::builtin::kOr) {
MaybeMakeShortcircuitRecursive(&expr, true);
} else if (call_expr.function() == cel::builtin::kAnd) {
MaybeMakeShortcircuitRecursive(&expr, false);
} else if (enable_optional_types_) {
if (call_expr.function() == kOptionalOrFn) {
MaybeMakeOptionalShortcircuitRecursive(&expr,
false);
} else if (call_expr.function() == kOptionalOrValueFn) {
MaybeMakeOptionalShortcircuitRecursive(&expr,
true);
}
}
return;
}
if (call_expr.function() == cel::builtin::kIndex) {
auto depth = RecursionEligible();
if (depth.has_value()) {
auto args = ExtractRecursiveDependencies();
if (args.size() != 2) {
SetProgressStatusError(absl::InvalidArgumentError(
"unexpected number of args for builtin index operator"));
}
SetRecursiveStep(CreateDirectContainerAccessStep(
std::move(args[0]), std::move(args[1]),
enable_optional_types_, expr.id()),
*depth + 1);
return;
}
AddStep(CreateContainerAccessStep(call_expr, expr.id(),
enable_optional_types_));
return;
}
if (block_.has_value()) {
BlockInfo& block = *block_;
if (block.expr == &expr) {
block.in = false;
index_manager().ReleaseSlots(block.size);
AddStep(CreateClearSlotsStep(block.index, block.size, -1));
return;
}
}
absl::string_view function = call_expr.function();
if (!comprehension_stack_.empty() &&
comprehension_stack_.back().is_optimizable_list_append) {
const cel::ast_internal::Comprehension* comprehension =
comprehension_stack_.back().comprehension;
const cel::ast_internal::Expr& loop_step = comprehension->loop_step();
if (&loop_step == &expr) {
function = cel::builtin::kRuntimeListAppend;
}
if (loop_step.has_call_expr() &&
loop_step.call_expr().function() == cel::builtin::kTernary &&
loop_step.call_expr().args().size() == 3 &&
&(loop_step.call_expr().args()[1]) == &expr) {
function = cel::builtin::kRuntimeListAppend;
}
}
AddResolvedFunctionStep(&call_expr, &expr, function);
}
void PreVisitComprehension(
const cel::ast_internal::Expr& expr,
const cel::ast_internal::Comprehension& comprehension) override {
if (!progress_status_.ok()) {
return;
}
if (!ValidateOrError(options_.enable_comprehension,
"Comprehension support is disabled")) {
return;
}
const auto& accu_var = comprehension.accu_var();
const auto& iter_var = comprehension.iter_var();
ValidateOrError(!accu_var.empty(),
"Invalid comprehension: 'accu_var' must not be empty");
ValidateOrError(!iter_var.empty(),
"Invalid comprehension: 'iter_var' must not be empty");
ValidateOrError(
accu_var != iter_var,
"Invalid comprehension: 'accu_var' must not be the same as 'iter_var'");
ValidateOrError(comprehension.has_accu_init(),
"Invalid comprehension: 'accu_init' must be set");
ValidateOrError(comprehension.has_loop_condition(),
"Invalid comprehension: 'loop_condition' must be set");
ValidateOrError(comprehension.has_loop_step(),
"Invalid comprehension: 'loop_step' must be set");
ValidateOrError(comprehension.has_result(),
"Invalid comprehension: 'result' must be set");
size_t iter_slot, accu_slot, slot_count;
bool is_bind = IsBind(&comprehension);
if (is_bind) {
accu_slot = iter_slot = index_manager_.ReserveSlots(1);
slot_count = 1;
} else {
iter_slot = index_manager_.ReserveSlots(2);
accu_slot = iter_slot + 1;
slot_count = 2;
}
for (ComprehensionStackRecord& record : comprehension_stack_) {
if (record.in_accu_init && record.is_optimizable_bind) {
record.slot_count += slot_count;
slot_count = 0;
break;
}
}
comprehension_stack_.push_back(
{&expr, &comprehension, iter_slot, accu_slot, slot_count,
-1,
IsOptimizableListAppend(&comprehension,
options_.enable_comprehension_list_append),
is_bind,
false,
false,
false,
std::make_unique<ComprehensionVisitor>(
this, options_.short_circuiting, is_bind, iter_slot, accu_slot)});
comprehension_stack_.back().visitor->PreVisit(&expr);
}
void PostVisitComprehension(
const cel::ast_internal::Expr& expr,
const cel::ast_internal::Comprehension& comprehension_expr) override {
if (!progress_status_.ok()) {
return;
}
ComprehensionStackRecord& record = comprehension_stack_.back();
if (comprehension_stack_.empty() ||
record.comprehension != &comprehension_expr) {
return;
}
record.visitor->PostVisit(&expr);
index_manager_.ReleaseSlots(record.slot_count);
comprehension_stack_.pop_back();
}
void PreVisitComprehensionSubexpression(
const cel::ast_internal::Expr& expr,
const cel::ast_internal::Comprehension& compr,
cel::ComprehensionArg comprehension_arg) override {
if (!progress_status_.ok()) {
return;
}
if (comprehension_stack_.empty() ||
comprehension_stack_.back().comprehension != &compr) {
return;
}
ComprehensionStackRecord& record = comprehension_stack_.back();
switch (comprehension_arg) {
case cel::ITER_RANGE: {
record.in_accu_init = false;
record.iter_var_in_scope = false;
record.accu_var_in_scope = false;
break;
}
case cel::ACCU_INIT: {
record.in_accu_init = true;
record.iter_var_in_scope = false;
record.accu_var_in_scope = false;
break;
}
case cel::LOOP_CONDITION: {
record.in_accu_init = false;
record.iter_var_in_scope = true;
record.accu_var_in_scope = true;
break;
}
case cel::LOOP_STEP: {
record.in_accu_init = false;
record.iter_var_in_scope = true;
record.accu_var_in_scope = true;
break;
}
case cel::RESULT: {
record.in_accu_init = false;
record.iter_var_in_scope = false;
record.accu_var_in_scope = true;
break;
}
}
}
void PostVisitComprehensionSubexpression(
const cel::ast_internal::Expr& expr,
const cel::ast_internal::Comprehension& compr,
cel::ComprehensionArg comprehension_arg) override {
if (!progress_status_.ok()) {
return;
}
if (comprehension_stack_.empty() ||
comprehension_stack_.back().comprehension != &compr) {
return;
}
SetProgressStatusError(comprehension_stack_.back().visitor->PostVisitArg(
comprehension_arg, comprehension_stack_.back().expr));
}
void PostVisitArg(const cel::ast_internal::Expr& expr, int arg_num) override {
if (!progress_status_.ok()) {
return;
}
auto cond_visitor = FindCondVisitor(&expr);
if (cond_visitor) {
cond_visitor->PostVisitArg(arg_num, &expr);
}
}
void PostVisitTarget(const cel::ast_internal::Expr& expr) override {
if (!progress_status_.ok()) {
return;
}
auto cond_visitor = FindCondVisitor(&expr);
if (cond_visitor) {
cond_visitor->PostVisitTarget(&expr);
}
}
void PostVisitList(const cel::ast_internal::Expr& expr,
const cel::ast_internal::CreateList& list_expr) override {
if (!progress_status_.ok()) {
return;
}
if (block_.has_value()) {
BlockInfo& block = *block_;
if (block.bindings == &expr) {
return;
}
}
if (!comprehension_stack_.empty()) {
const ComprehensionStackRecord& comprehension =
comprehension_stack_.back();
if (comprehension.is_optimizable_list_append &&
&(comprehension.comprehension->accu_init()) == &expr) {
if (options_.max_recursion_depth != 0) {
SetRecursiveStep(CreateDirectMutableListStep(expr.id()), 1);
return;
}
AddStep(CreateMutableListStep(expr.id()));
return;
}
}
absl::optional<int> depth = RecursionEligible();
if (depth.has_value()) {
auto deps = ExtractRecursiveDependencies();
if (deps.size() != list_expr.elements().size()) {
SetProgressStatusError(absl::InternalError(
"Unexpected number of plan elements for CreateList expr"));
return;
}
auto step = CreateDirectListStep(
std::move(deps), MakeOptionalIndicesSet(list_expr), expr.id());
SetRecursiveStep(std::move(step), *depth + 1);
return;
}
AddStep(CreateCreateListStep(list_expr, expr.id()));
}
void PostVisitStruct(
const cel::ast_internal::Expr& expr,
const cel::ast_internal::CreateStruct& struct_expr) override {
if (!progress_status_.ok()) {
return;
}
auto status_or_resolved_fields =
ResolveCreateStructFields(struct_expr, expr.id());
if (!status_or_resolved_fields.ok()) {
SetProgressStatusError(status_or_resolved_fields.status());
return;
}
std::string resolved_name =
std::move(status_or_resolved_fields.value().first);
std::vector<std::string> fields =
std::move(status_or_resolved_fields.value().second);
auto depth = RecursionEligible();
if (depth.has_value()) {
auto deps = ExtractRecursiveDependencies();
if (deps.size() != struct_expr.fields().size()) {
SetProgressStatusError(absl::InternalError(
"Unexpected number of plan elements for CreateStruct expr"));
return;
}
auto step = CreateDirectCreateStructStep(
std::move(resolved_name), std::move(fields), std::move(deps),
MakeOptionalIndicesSet(struct_expr), expr.id());
SetRecursiveStep(std::move(step), *depth + 1);
return;
}
AddStep(CreateCreateStructStep(std::move(resolved_name), std::move(fields),
MakeOptionalIndicesSet(struct_expr),
expr.id()));
}
void PostVisitMap(const cel::ast_internal::Expr& expr,
const cel::MapExpr& map_expr) override {
for (const auto& entry : map_expr.entries()) {
ValidateOrError(entry.has_key(), "Map entry missing key");
ValidateOrError(entry.has_value(), "Map entry missing value");
}
auto depth = RecursionEligible();
if (depth.has_value()) {
auto deps = ExtractRecursiveDependencies();
if (deps.size() != 2 * map_expr.entries().size()) {
SetProgressStatusError(absl::InternalError(
"Unexpected number of plan elements for CreateStruct expr"));
return;
}
auto step = CreateDirectCreateMapStep(
std::move(deps), MakeOptionalIndicesSet(map_expr), expr.id());
SetRecursiveStep(std::move(step), *depth + 1);
return;
}
AddStep(CreateCreateStructStepForMap(map_expr.entries().size(),
MakeOptionalIndicesSet(map_expr),
expr.id()));
}
absl::Status progress_status() const { return progress_status_; }
cel::ValueManager& value_factory() { return value_factory_; }
void SuppressBranch(const cel::ast_internal::Expr* expr) {
suppressed_branches_.insert(expr);
}
void AddResolvedFunctionStep(const cel::ast_internal::Call* call_expr,
const cel::ast_internal::Expr* expr,
absl::string_view function) {
bool receiver_style = call_expr->has_target();
size_t num_args = call_expr->args().size() + (receiver_style ? 1 : 0);
auto arguments_matcher = ArgumentsMatcher(num_args);
auto lazy_overloads = resolver_.FindLazyOverloads(
function, call_expr->has_target(), arguments_matcher, expr->id());
if (!lazy_overloads.empty()) {
auto depth = RecursionEligible();
if (depth.has_value()) {
auto args = program_builder_.current()->ExtractRecursiveDependencies();
SetRecursiveStep(CreateDirectLazyFunctionStep(
expr->id(), *call_expr, std::move(args),
std::move(lazy_overloads)),
*depth + 1);
return;
}
AddStep(CreateFunctionStep(*call_expr, expr->id(),
std::move(lazy_overloads)));
return;
}
auto overloads = resolver_.FindOverloads(function, receiver_style,
arguments_matcher, expr->id());
if (overloads.empty()) {
auto status = issue_collector_.AddIssue(RuntimeIssue::CreateWarning(
absl::InvalidArgumentError(
"No overloads provided for FunctionStep creation"),
RuntimeIssue::ErrorCode::kNoMatchingOverload));
if (!status.ok()) {
SetProgressStatusError(status);
return;
}
}
auto recursion_depth = RecursionEligible();
if (recursion_depth.has_value()) {
ABSL_DCHECK(program_builder_.current() != nullptr);
auto args = program_builder_.current()->ExtractRecursiveDependencies();
SetRecursiveStep(
CreateDirectFunctionStep(expr->id(), *call_expr, std::move(args),
std::move(overloads)),
*recursion_depth + 1);
return;
}
AddStep(CreateFunctionStep(*call_expr, expr->id(), std::move(overloads)));
}
void AddStep(absl::StatusOr<std::unique_ptr<ExpressionStep>> step) {
if (step.ok()) {
AddStep(*std::move(step));
} else {
SetProgressStatusError(step.status());
}
}
void AddStep(std::unique_ptr<ExpressionStep> step) {
if (progress_status_.ok() && !PlanningSuppressed()) {
program_builder_.AddStep(std::move(step));
}
}
void SetRecursiveStep(std::unique_ptr<DirectExpressionStep> step, int depth) {
if (!progress_status_.ok() || PlanningSuppressed()) {
return;
}
if (program_builder_.current() == nullptr) {
SetProgressStatusError(absl::InternalError(
"CEL AST traversal out of order in flat_expr_builder."));
return;
}
program_builder_.current()->set_recursive_program(std::move(step), depth);
}
void SetProgressStatusError(const absl::Status& status) {
if (progress_status_.ok() && !status.ok()) {
progress_status_ = status;
}
}
ProgramStepIndex GetCurrentIndex() const {
ABSL_DCHECK(program_builder_.current() != nullptr);
return {static_cast<int>(program_builder_.current()->elements().size()),
program_builder_.current()};
}
CondVisitor* FindCondVisitor(const cel::ast_internal::Expr* expr) const {
if (cond_visitor_stack_.empty()) {
return nullptr;
}
const auto& latest = cond_visitor_stack_.top();
return (latest.first == expr) ? latest.second.get() : nullptr;
}
IndexManager& index_manager() { return index_manager_; }
size_t slot_count() const { return index_manager_.max_slot_count(); }
void AddOptimizer(std::unique_ptr<ProgramOptimizer> optimizer) {
program_optimizers_.push_back(std::move(optimizer));
}
template <typename... MP>
bool ValidateOrError(bool valid_expression, absl::string_view error_message,
MP... message_parts) {
if (valid_expression) {
return true;
}
SetProgressStatusError(absl::InvalidArgumentError(
absl::StrCat(error_message, message_parts...)));
return false;
}
private:
struct ComprehensionStackRecord {
const cel::ast_internal::Expr* expr;
const cel::ast_internal::Comprehension* comprehension;
size_t iter_slot;
size_t accu_slot;
size_t slot_count;
int subexpression;
bool is_optimizable_list_append;
bool is_optimizable_bind;
bool iter_var_in_scope;
bool accu_var_in_scope;
bool in_accu_init;
std::unique_ptr<ComprehensionVisitor> visitor;
};
struct BlockInfo {
bool in = false;
const cel::ast_internal::Expr* expr = nullptr;
const cel::ast_internal::Expr* bindings = nullptr;
absl::flat_hash_set<const cel::ast_internal::Expr*> bindings_set;
const cel::ast_internal::Expr* bound = nullptr;
size_t size = 0;
size_t index = 0;
size_t current_index = 0;
const cel::ast_internal::Expr* current_binding = nullptr;
std::vector<int> subexpressions;
};
bool PlanningSuppressed() const {
return resume_from_suppressed_branch_ != nullptr;
}
absl::Status MaybeExtractSubexpression(const cel::ast_internal::Expr* expr,
ComprehensionStackRecord& record) {
if (!record.is_optimizable_bind) {
return absl::OkStatus();
}
int index = program_builder_.ExtractSubexpression(expr);
if (index == -1) {
return absl::InternalError("Failed to extract subexpression");
}
record.subexpression = index;
record.visitor->MarkAccuInitExtracted();
return absl::OkStatus();
}
absl::StatusOr<std::pair<std::string, std::vector<std::string>>>
ResolveCreateStructFields(
const cel::ast_internal::CreateStruct& create_struct_expr,
int64_t expr_id) {
absl::string_view ast_name = create_struct_expr.name();
absl::optional<std::pair<std::string, cel::Type>> type;
CEL_ASSIGN_OR_RETURN(type, resolver_.FindType(ast_name, expr_id));
if (!type.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid struct creation: missing type info for '", ast_name, "'"));
}
std::string resolved_name = std::move(type).value().first;
std::vector<std::string> fields;
fields.reserve(create_struct_expr.fields().size());
for (const auto& entry : create_struct_expr.fields()) {
if (entry.name().empty()) {
return absl::InvalidArgumentError("Struct field missing name");
}
if (!entry.has_value()) {
return absl::InvalidArgumentError("Struct field missing value");
}
CEL_ASSIGN_OR_RETURN(
auto field, value_factory().FindStructTypeFieldByName(resolved_name,
entry.name()));
if (!field.has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid message creation: field '", entry.name(),
"' not found in '", resolved_name, "'"));
}
fields.push_back(entry.name());
}
return std::make_pair(std::move(resolved_name), std::move(fields));
}
const Resolver& resolver_;
ValueManager& value_factory_;
absl::Status progress_status_;
std::stack<
std::pair<const cel::ast_internal::Expr*, std::unique_ptr<CondVisitor>>>
cond_visitor_stack_;
std::deque<std::pair<const cel::ast_internal::Expr*, std::string>>
namespace_stack_;
const cel::ast_internal::Expr* resolved_select_expr_;
const cel::RuntimeOptions& options_;
std::vector<ComprehensionStackRecord> comprehension_stack_;
absl::flat_hash_set<const cel::ast_internal::Expr*> suppressed_branches_;
const cel::ast_internal::Expr* resume_from_suppressed_branch_ = nullptr;
std::vector<std::unique_ptr<ProgramOptimizer>> program_optimizers_;
IssueCollector& issue_collector_;
ProgramBuilder& program_builder_;
PlannerContext extension_context_;
IndexManager index_manager_;
bool enable_optional_types_;
absl::optional<BlockInfo> block_;
};
void BinaryCondVisitor::PreVisit(const cel::ast_internal::Expr* expr) {
switch (cond_) {
case BinaryCond::kAnd:
ABSL_FALLTHROUGH_INTENDED;
case BinaryCond::kOr:
visitor_->ValidateOrError(
!expr->call_expr().has_target() &&
expr->call_expr().args().size() == 2,
"Invalid argument count for a binary function call.");
break;
case BinaryCond::kOptionalOr:
ABSL_FALLTHROUGH_INTENDED;
case BinaryCond::kOptionalOrValue:
visitor_->ValidateOrError(expr->call_expr().has_target() &&
expr->call_expr().args().size() == 1,
"Invalid argument count for or/orValue call.");
break;
}
}
void BinaryCondVisitor::PostVisitArg(int arg_num,
const cel::ast_internal::Expr* expr) {
if (short_circuiting_ && arg_num == 0 &&
(cond_ == BinaryCond::kAnd || cond_ == BinaryCond::kOr)) {
absl::StatusOr<std::unique_ptr<JumpStepBase>> jump_step;
switch (cond_) {
case BinaryCond::kAnd:
jump_step = CreateCondJumpStep(false, true, {}, expr->id());
break;
case BinaryCond::kOr:
jump_step = CreateCondJumpStep(true, true, {}, expr->id());
break;
default:
ABSL_UNREACHABLE();
}
if (jump_step.ok()) {
jump_step_ = Jump(visitor_->GetCurrentIndex(), jump_step->get());
}
visitor_->AddStep(std::move(jump_step));
}
}
void BinaryCondVisitor::PostVisitTarget(const cel::ast_internal::Expr* expr) {
if (short_circuiting_ && (cond_ == BinaryCond::kOptionalOr ||
cond_ == BinaryCond::kOptionalOrValue)) {
absl::StatusOr<std::unique_ptr<JumpStepBase>> jump_step;
switch (cond_) {
case BinaryCond::kOptionalOr:
jump_step = CreateOptionalHasValueJumpStep(false, expr->id());
break;
case BinaryCond::kOptionalOrValue:
jump_step = CreateOptionalHasValueJumpStep(true, expr->id());
break;
default:
ABSL_UNREACHABLE();
}
if (jump_step.ok()) {
jump_step_ = Jump(visitor_->GetCurrentIndex(), jump_step->get());
}
visitor_->AddStep(std::move(jump_step));
}
}
void BinaryCondVisitor::PostVisit(const cel::ast_internal::Expr* expr) {
switch (cond_) {
case BinaryCond::kAnd:
visitor_->AddStep(CreateAndStep(expr->id()));
break;
case BinaryCond::kOr:
visitor_->AddStep(CreateOrStep(expr->id()));
break;
case BinaryCond::kOptionalOr:
visitor_->AddStep(
CreateOptionalOrStep(false, expr->id()));
break;
case BinaryCond::kOptionalOrValue:
visitor_->AddStep(CreateOptionalOrStep(true, expr->id()));
break;
default:
ABSL_UNREACHABLE();
}
if (short_circuiting_) {
visitor_->SetProgressStatusError(
jump_step_.set_target(visitor_->GetCurrentIndex()));
}
}
void TernaryCondVisitor::PreVisit(const cel::ast_internal::Expr* expr) {
visitor_->ValidateOrError(
!expr->call_expr().has_target() && expr->call_expr().args().size() == 3,
"Invalid argument count for a ternary function call.");
}
void TernaryCondVisitor::PostVisitArg(int arg_num,
const cel::ast_internal::Expr* expr) {
if (arg_num == 0) {
auto error_jump = CreateBoolCheckJumpStep({}, expr->id());
if (error_jump.ok()) {
error_jump_ = Jump(visitor_->GetCurrentIndex(), error_jump->get());
}
visitor_->AddStep(std::move(error_jump));
auto jump_to_second = CreateCondJumpStep(false, false, {}, expr->id());
if (jump_to_second.ok()) {
jump_to_second_ =
Jump(visitor_->GetCurrentIndex(), jump_to_second->get());
}
visitor_->AddStep(std::move(jump_to_second));
} else if (arg_num == 1) {
auto jump_after_first = CreateJumpStep({}, expr->id());
if (!jump_after_first.ok()) {
visitor_->SetProgressStatusError(jump_after_first.status());
}
jump_after_first_ =
Jump(visitor_->GetCurrentIndex(), jump_after_first->get());
visitor_->AddStep(std::move(jump_after_first));
if (visitor_->ValidateOrError(
jump_to_second_.exists(),
"Error configuring ternary operator: jump_to_second_ is null")) {
visitor_->SetProgressStatusError(
jump_to_second_.set_target(visitor_->GetCurrentIndex()));
}
}
}
void TernaryCondVisitor::PostVisit(const cel::ast_internal::Expr*) {
if (visitor_->ValidateOrError(
error_jump_.exists(),
"Error configuring ternary operator: error_jump_ is null")) {
visitor_->SetProgressStatusError(
error_jump_.set_target(visitor_->GetCurrentIndex()));
}
if (visitor_->ValidateOrError(
jump_after_first_.exists(),
"Error configuring ternary operator: jump_after_first_ is null")) {
visitor_->SetProgressStatusError(
jump_after_first_.set_target(visitor_->GetCurrentIndex()));
}
}
void ExhaustiveTernaryCondVisitor::PreVisit(
const cel::ast_internal::Expr* expr) {
visitor_->ValidateOrError(
!expr->call_expr().has_target() && expr->call_expr().args().size() == 3,
"Invalid argument count for a ternary function call.");
}
void ExhaustiveTernaryCondVisitor::PostVisit(
const cel::ast_internal::Expr* expr) {
visitor_->AddStep(CreateTernaryStep(expr->id()));
}
void ComprehensionVisitor::PreVisit(const cel::ast_internal::Expr* expr) {
if (is_trivial_) {
visitor_->SuppressBranch(&expr->comprehension_expr().iter_range());
visitor_->SuppressBranch(&expr->comprehension_expr().loop_condition());
visitor_->SuppressBranch(&expr->comprehension_expr().loop_step());
}
}
absl::Status ComprehensionVisitor::PostVisitArgDefault(
cel::ComprehensionArg arg_num, const cel::ast_internal::Expr* expr) {
switch (arg_num) {
case cel::ITER_RANGE: {
visitor_->AddStep(CreateComprehensionInitStep(expr->id()));
break;
}
case cel::ACCU_INIT: {
next_step_pos_ = visitor_->GetCurrentIndex();
next_step_ =
new ComprehensionNextStep(iter_slot_, accu_slot_, expr->id());
visitor_->AddStep(std::unique_ptr<ExpressionStep>(next_step_));
break;
}
case cel::LOOP_CONDITION: {
cond_step_pos_ = visitor_->GetCurrentIndex();
cond_step_ = new ComprehensionCondStep(iter_slot_, accu_slot_,
short_circuiting_, expr->id());
visitor_->AddStep(std::unique_ptr<ExpressionStep>(cond_step_));
break;
}
case cel::LOOP_STEP: {
auto jump_to_next = CreateJumpStep({}, expr->id());
Jump jump_helper(visitor_->GetCurrentIndex(), jump_to_next->get());
visitor_->AddStep(std::move(jump_to_next));
visitor_->SetProgressStatusError(jump_helper.set_target(next_step_pos_));
CEL_ASSIGN_OR_RETURN(
int jump_from_cond,
Jump::CalculateOffset(cond_step_pos_, visitor_->GetCurrentIndex()));
cond_step_->set_jump_offset(jump_from_cond);
CEL_ASSIGN_OR_RETURN(
int jump_from_next,
Jump::CalculateOffset(next_step_pos_, visitor_->GetCurrentIndex()));
next_step_->set_jump_offset(jump_from_next);
break;
}
case cel::RESULT: {
visitor_->AddStep(CreateComprehensionFinishStep(accu_slot_, expr->id()));
CEL_ASSIGN_OR_RETURN(
int jump_from_next,
Jump::CalculateOffset(next_step_pos_, visitor_->GetCurrentIndex()));
next_step_->set_error_jump_offset(jump_from_next);
CEL_ASSIGN_OR_RETURN(
int jump_from_cond,
Jump::CalculateOffset(cond_step_pos_, visitor_->GetCurrentIndex()));
cond_step_->set_error_jump_offset(jump_from_cond);
break;
}
}
return absl::OkStatus();
}
void ComprehensionVisitor::PostVisitArgTrivial(
cel::ComprehensionArg arg_num, const cel::ast_internal::Expr* expr) {
switch (arg_num) {
case cel::ITER_RANGE: {
break;
}
case cel::ACCU_INIT: {
if (!accu_init_extracted_) {
visitor_->AddStep(CreateAssignSlotAndPopStep(accu_slot_));
}
break;
}
case cel::LOOP_CONDITION: {
break;
}
case cel::LOOP_STEP: {
break;
}
case cel::RESULT: {
visitor_->AddStep(CreateClearSlotStep(accu_slot_, expr->id()));
break;
}
}
}
void ComprehensionVisitor::PostVisit(const cel::ast_internal::Expr* expr) {
if (is_trivial_) {
visitor_->MaybeMakeBindRecursive(expr, &expr->comprehension_expr(),
accu_slot_);
return;
}
visitor_->MaybeMakeComprehensionRecursive(expr, &expr->comprehension_expr(),
iter_slot_, accu_slot_);
}
std::vector<ExecutionPathView> FlattenExpressionTable(
ProgramBuilder& program_builder, ExecutionPath& main) {
std::vector<std::pair<size_t, size_t>> ranges;
main = program_builder.FlattenMain();
ranges.push_back(std::make_pair(0, main.size()));
std::vector<ExecutionPath> subexpressions =
program_builder.FlattenSubexpressions();
for (auto& subexpression : subexpressions) {
ranges.push_back(std::make_pair(main.size(), subexpression.size()));
absl::c_move(subexpression, std::back_inserter(main));
}
std::vector<ExecutionPathView> subexpression_indexes;
subexpression_indexes.reserve(ranges.size());
for (const auto& range : ranges) {
subexpression_indexes.push_back(
absl::MakeSpan(main).subspan(range.first, range.second));
}
return subexpression_indexes;
}
}
absl::StatusOr<FlatExpression> FlatExprBuilder::CreateExpressionImpl(
std::unique_ptr<Ast> ast, std::vector<RuntimeIssue>* issues) const {
cel::common_internal::LegacyValueManager value_factory(
cel::MemoryManagerRef::ReferenceCounting(),
type_registry_.GetComposedTypeProvider());
RuntimeIssue::Severity max_severity = options_.fail_on_warnings
? RuntimeIssue::Severity::kWarning
: RuntimeIssue::Severity::kError;
IssueCollector issue_collector(max_severity);
Resolver resolver(container_, function_registry_, type_registry_,
value_factory, type_registry_.resolveable_enums(),
options_.enable_qualified_type_identifiers);
ProgramBuilder program_builder;
PlannerContext extension_context(resolver, options_, value_factory,
issue_collector, program_builder);
auto& ast_impl = AstImpl::CastFromPublicAst(*ast);
if (absl::StartsWith(container_, ".") || absl::EndsWith(container_, ".")) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid expression container: '", container_, "'"));
}
for (const std::unique_ptr<AstTransform>& transform : ast_transforms_) {
CEL_RETURN_IF_ERROR(transform->UpdateAst(extension_context, ast_impl));
}
std::vector<std::unique_ptr<ProgramOptimizer>> optimizers;
for (const ProgramOptimizerFactory& optimizer_factory : program_optimizers_) {
CEL_ASSIGN_OR_RETURN(auto optimizer,
optimizer_factory(extension_context, ast_impl));
if (optimizer != nullptr) {
optimizers.push_back(std::move(optimizer));
}
}
FlatExprVisitor visitor(resolver, options_, std::move(optimizers),
ast_impl.reference_map(), value_factory,
issue_collector, program_builder, extension_context,
enable_optional_types_);
cel::TraversalOptions opts;
opts.use_comprehension_callbacks = true;
AstTraverse(ast_impl.root_expr(), visitor, opts);
if (!visitor.progress_status().ok()) {
return visitor.progress_status();
}
if (issues != nullptr) {
(*issues) = issue_collector.ExtractIssues();
}
ExecutionPath execution_path;
std::vector<ExecutionPathView> subexpressions =
FlattenExpressionTable(program_builder, execution_path);
return FlatExpression(std::move(execution_path), std::move(subexpressions),
visitor.slot_count(),
type_registry_.GetComposedTypeProvider(), options_);
}
} | #include "eval/compiler/flat_expr_builder.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/field_mask.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "base/function.h"
#include "base/function_descriptor.h"
#include "eval/compiler/cel_expression_builder_flat_impl.h"
#include "eval/compiler/constant_folding.h"
#include "eval/compiler/qualified_reference_resolver.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_builtins.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function_adapter.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_map_impl.h"
#include "eval/public/portable_cel_function_adapter.h"
#include "eval/public/structs/cel_proto_descriptor_pool_builder.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/public/structs/protobuf_descriptor_type_provider.h"
#include "eval/public/testing/matchers.h"
#include "eval/public/unknown_attribute_set.h"
#include "eval/public/unknown_set.h"
#include "eval/testutil/test_message.pb.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/proto_file_util.h"
#include "internal/proto_matchers.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/runtime_options.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/dynamic_message.h"
#include "google/protobuf/message.h"
#include "google/protobuf/text_format.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::Value;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::internal::test::EqualsProto;
using ::cel::internal::test::ReadBinaryProtoFromFile;
using ::google::api::expr::v1alpha1::CheckedExpr;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::test::v1::proto3::TestAllTypes;
using ::testing::_;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::testing::Truly;
inline constexpr absl::string_view kSimpleTestMessageDescriptorSetFile =
"eval/testutil/"
"simple_test_message_proto-descriptor-set.proto.bin";
class ConcatFunction : public CelFunction {
public:
explicit ConcatFunction() : CelFunction(CreateDescriptor()) {}
static CelFunctionDescriptor CreateDescriptor() {
return CelFunctionDescriptor{
"concat", false, {CelValue::Type::kString, CelValue::Type::kString}};
}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
if (args.size() != 2) {
return absl::InvalidArgumentError("Bad arguments number");
}
std::string concat = std::string(args[0].StringOrDie().value()) +
std::string(args[1].StringOrDie().value());
auto* concatenated =
google::protobuf::Arena::Create<std::string>(arena, std::move(concat));
*result = CelValue::CreateString(concatenated);
return absl::OkStatus();
}
};
class RecorderFunction : public CelFunction {
public:
explicit RecorderFunction(const std::string& name, int* count)
: CelFunction(CelFunctionDescriptor{name, false, {}}), count_(count) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
google::protobuf::Arena* arena) const override {
if (!args.empty()) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Bad arguments number");
}
(*count_)++;
*result = CelValue::CreateBool(true);
return absl::OkStatus();
}
int* count_;
};
TEST(FlatExprBuilderTest, SimpleEndToEnd) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function("concat");
auto arg1 = call_expr->add_args();
arg1->mutable_const_expr()->set_string_value("prefix");
auto arg2 = call_expr->add_args();
arg2->mutable_ident_expr()->set_name("value");
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(
builder.GetRegistry()->Register(std::make_unique<ConcatFunction>()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
std::string variable = "test";
Activation activation;
activation.InsertValue("value", CelValue::CreateString(&variable));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsString());
EXPECT_THAT(result.StringOrDie().value(), Eq("prefixtest"));
}
TEST(FlatExprBuilderTest, ExprUnset) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid empty expression")));
}
TEST(FlatExprBuilderTest, ConstValueUnset) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
expr.mutable_const_expr();
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("unspecified constant")));
}
TEST(FlatExprBuilderTest, MapKeyValueUnset) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
auto* entry = expr.mutable_struct_expr()->add_entries();
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Map entry missing key")));
entry->mutable_map_key()->mutable_const_expr()->set_bool_value(true);
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Map entry missing value")));
}
TEST(FlatExprBuilderTest, MessageFieldValueUnset) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
auto* create_message = expr.mutable_struct_expr();
create_message->set_message_name("google.protobuf.Value");
auto* entry = create_message->add_entries();
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Struct field missing name")));
entry->set_field_key("bool_value");
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Struct field missing value")));
}
TEST(FlatExprBuilderTest, BinaryCallTooManyArguments) {
Expr expr;
SourceInfo source_info;
CelExpressionBuilderFlatImpl builder;
auto* call = expr.mutable_call_expr();
call->set_function(builtin::kAnd);
call->mutable_target()->mutable_const_expr()->set_string_value("random");
call->add_args()->mutable_const_expr()->set_bool_value(false);
call->add_args()->mutable_const_expr()->set_bool_value(true);
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid argument count")));
}
TEST(FlatExprBuilderTest, TernaryCallTooManyArguments) {
Expr expr;
SourceInfo source_info;
auto* call = expr.mutable_call_expr();
call->set_function(builtin::kTernary);
call->mutable_target()->mutable_const_expr()->set_string_value("random");
call->add_args()->mutable_const_expr()->set_bool_value(false);
call->add_args()->mutable_const_expr()->set_int64_value(1);
call->add_args()->mutable_const_expr()->set_int64_value(2);
{
cel::RuntimeOptions options;
options.short_circuiting = true;
CelExpressionBuilderFlatImpl builder(options);
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid argument count")));
}
{
cel::RuntimeOptions options;
options.short_circuiting = false;
CelExpressionBuilderFlatImpl builder(options);
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid argument count")));
}
}
TEST(FlatExprBuilderTest, DelayedFunctionResolutionErrors) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function("concat");
auto arg1 = call_expr->add_args();
arg1->mutable_const_expr()->set_string_value("prefix");
auto arg2 = call_expr->add_args();
arg2->mutable_ident_expr()->set_name("value");
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> warnings;
ASSERT_OK_AND_ASSIGN(
auto cel_expr, builder.CreateExpression(&expr, &source_info, &warnings));
std::string variable = "test";
Activation activation;
activation.InsertValue("value", CelValue::CreateString(&variable));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsError());
EXPECT_THAT(result.ErrorOrDie()->message(),
Eq("No matching overloads found : concat(string, string)"));
ASSERT_THAT(warnings, testing::SizeIs(1));
EXPECT_EQ(warnings[0].code(), absl::StatusCode::kInvalidArgument);
EXPECT_THAT(std::string(warnings[0].message()),
testing::HasSubstr("No overloads provided"));
}
TEST(FlatExprBuilderTest, Shortcircuiting) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function("_||_");
auto arg1 = call_expr->add_args();
arg1->mutable_call_expr()->set_function("recorder1");
auto arg2 = call_expr->add_args();
arg2->mutable_call_expr()->set_function("recorder2");
Activation activation;
google::protobuf::Arena arena;
{
cel::RuntimeOptions options;
options.short_circuiting = true;
CelExpressionBuilderFlatImpl builder(options);
auto builtin = RegisterBuiltinFunctions(builder.GetRegistry());
int count1 = 0;
int count2 = 0;
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder1", &count1)));
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder2", &count2)));
ASSERT_OK_AND_ASSIGN(auto cel_expr_on,
builder.CreateExpression(&expr, &source_info));
ASSERT_OK(cel_expr_on->Evaluate(activation, &arena));
EXPECT_THAT(count1, Eq(1));
EXPECT_THAT(count2, Eq(0));
}
{
cel::RuntimeOptions options;
options.short_circuiting = false;
CelExpressionBuilderFlatImpl builder(options);
auto builtin = RegisterBuiltinFunctions(builder.GetRegistry());
int count1 = 0;
int count2 = 0;
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder1", &count1)));
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder2", &count2)));
ASSERT_OK_AND_ASSIGN(auto cel_expr_off,
builder.CreateExpression(&expr, &source_info));
ASSERT_OK(cel_expr_off->Evaluate(activation, &arena));
EXPECT_THAT(count1, Eq(1));
EXPECT_THAT(count2, Eq(1));
}
}
TEST(FlatExprBuilderTest, ShortcircuitingComprehension) {
Expr expr;
SourceInfo source_info;
auto comprehension_expr = expr.mutable_comprehension_expr();
comprehension_expr->set_iter_var("x");
auto list_expr =
comprehension_expr->mutable_iter_range()->mutable_list_expr();
list_expr->add_elements()->mutable_const_expr()->set_int64_value(1);
list_expr->add_elements()->mutable_const_expr()->set_int64_value(2);
list_expr->add_elements()->mutable_const_expr()->set_int64_value(3);
comprehension_expr->set_accu_var("accu");
comprehension_expr->mutable_accu_init()->mutable_const_expr()->set_bool_value(
false);
comprehension_expr->mutable_loop_condition()
->mutable_const_expr()
->set_bool_value(false);
comprehension_expr->mutable_loop_step()->mutable_call_expr()->set_function(
"recorder_function1");
comprehension_expr->mutable_result()->mutable_const_expr()->set_bool_value(
false);
Activation activation;
google::protobuf::Arena arena;
{
cel::RuntimeOptions options;
options.short_circuiting = true;
CelExpressionBuilderFlatImpl builder(options);
auto builtin = RegisterBuiltinFunctions(builder.GetRegistry());
int count = 0;
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder_function1", &count)));
ASSERT_OK_AND_ASSIGN(auto cel_expr_on,
builder.CreateExpression(&expr, &source_info));
ASSERT_OK(cel_expr_on->Evaluate(activation, &arena));
EXPECT_THAT(count, Eq(0));
}
{
cel::RuntimeOptions options;
options.short_circuiting = false;
CelExpressionBuilderFlatImpl builder(options);
auto builtin = RegisterBuiltinFunctions(builder.GetRegistry());
int count = 0;
ASSERT_OK(builder.GetRegistry()->Register(
std::make_unique<RecorderFunction>("recorder_function1", &count)));
ASSERT_OK_AND_ASSIGN(auto cel_expr_off,
builder.CreateExpression(&expr, &source_info));
ASSERT_OK(cel_expr_off->Evaluate(activation, &arena));
EXPECT_THAT(count, Eq(3));
}
}
TEST(FlatExprBuilderTest, IdentExprUnsetName) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(ident_expr {})", &expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'name' must not be empty")));
}
TEST(FlatExprBuilderTest, SelectExprUnsetField) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(select_expr{
operand{ ident_expr {name: 'var'} }
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'field' must not be empty")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetAccuVar) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(comprehension_expr{})", &expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'accu_var' must not be empty")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetIterVar) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr{accu_var: "a"}
)",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'iter_var' must not be empty")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetAccuInit) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr{
accu_var: "a"
iter_var: "b"}
)",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'accu_init' must be set")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetLoopCondition) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr{
accu_var: 'a'
iter_var: 'b'
accu_init {
const_expr {bool_value: true}
}}
)",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'loop_condition' must be set")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetLoopStep) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr{
accu_var: 'a'
iter_var: 'b'
accu_init {
const_expr {bool_value: true}
}
loop_condition {
const_expr {bool_value: true}
}}
)",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'loop_step' must be set")));
}
TEST(FlatExprBuilderTest, ComprehensionExprUnsetResult) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr{
accu_var: 'a'
iter_var: 'b'
accu_init {
const_expr {bool_value: true}
}
loop_condition {
const_expr {bool_value: true}
}
loop_step {
const_expr {bool_value: false}
}}
)",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("'result' must be set")));
}
TEST(FlatExprBuilderTest, MapComprehension) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr {
iter_var: "k"
accu_var: "accu"
accu_init {
const_expr { bool_value: true }
}
loop_condition { ident_expr { name: "accu" } }
result { ident_expr { name: "accu" } }
loop_step {
call_expr {
function: "_&&_"
args {
ident_expr { name: "accu" }
}
args {
call_expr {
function: "_>_"
args { ident_expr { name: "k" } }
args { const_expr { int64_value: 0 } }
}
}
}
}
iter_range {
struct_expr {
entries {
map_key { const_expr { int64_value: 1 } }
value { const_expr { string_value: "" } }
}
entries {
map_key { const_expr { int64_value: 2 } }
value { const_expr { string_value: "" } }
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
EXPECT_TRUE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, InvalidContainer) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
call_expr {
function: "_&&_"
args {
ident_expr {
name: "foo"
}
}
args {
ident_expr {
name: "bar"
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
builder.set_container(".bad");
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("container: '.bad'")));
builder.set_container("bad.");
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("container: 'bad.'")));
}
TEST(FlatExprBuilderTest, ParsedNamespacedFunctionSupport) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("ext.XOr(a, b)"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
using FunctionAdapterT = FunctionAdapter<bool, bool, bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"ext.XOr", false,
[](google::protobuf::Arena*, bool a, bool b) { return a != b; },
builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
act1.InsertValue("a", CelValue::CreateBool(false));
act1.InsertValue("b", CelValue::CreateBool(true));
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
Activation act2;
act2.InsertValue("a", CelValue::CreateBool(true));
act2.InsertValue("b", CelValue::CreateBool(true));
ASSERT_OK_AND_ASSIGN(result, cel_expr->Evaluate(act2, &arena));
EXPECT_THAT(result, test::IsCelBool(false));
}
TEST(FlatExprBuilderTest, ParsedNamespacedFunctionSupportWithContainer) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("XOr(a, b)"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
builder.set_container("ext");
using FunctionAdapterT = FunctionAdapter<bool, bool, bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"ext.XOr", false,
[](google::protobuf::Arena*, bool a, bool b) { return a != b; },
builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
act1.InsertValue("a", CelValue::CreateBool(false));
act1.InsertValue("b", CelValue::CreateBool(true));
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
Activation act2;
act2.InsertValue("a", CelValue::CreateBool(true));
act2.InsertValue("b", CelValue::CreateBool(true));
ASSERT_OK_AND_ASSIGN(result, cel_expr->Evaluate(act2, &arena));
EXPECT_THAT(result, test::IsCelBool(false));
}
TEST(FlatExprBuilderTest, ParsedNamespacedFunctionResolutionOrder) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("c.d.Get()"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
builder.set_container("a.b");
using FunctionAdapterT = FunctionAdapter<bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"a.b.c.d.Get", false,
[](google::protobuf::Arena*) { return true; }, builder.GetRegistry()));
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"c.d.Get", false, [](google::protobuf::Arena*) { return false; },
builder.GetRegistry()));
ASSERT_OK((FunctionAdapter<bool, bool>::CreateAndRegister(
"Get",
true, [](google::protobuf::Arena*, bool) { return false; },
builder.GetRegistry())));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
}
TEST(FlatExprBuilderTest,
ParsedNamespacedFunctionResolutionOrderParentContainer) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("c.d.Get()"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
builder.set_container("a.b");
using FunctionAdapterT = FunctionAdapter<bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"a.c.d.Get", false,
[](google::protobuf::Arena*) { return true; }, builder.GetRegistry()));
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"c.d.Get", false, [](google::protobuf::Arena*) { return false; },
builder.GetRegistry()));
ASSERT_OK((FunctionAdapter<bool, bool>::CreateAndRegister(
"Get",
true, [](google::protobuf::Arena*, bool) { return false; },
builder.GetRegistry())));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
}
TEST(FlatExprBuilderTest,
ParsedNamespacedFunctionResolutionOrderExplicitGlobal) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse(".c.d.Get()"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
builder.set_container("a.b");
using FunctionAdapterT = FunctionAdapter<bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"a.c.d.Get", false,
[](google::protobuf::Arena*) { return false; }, builder.GetRegistry()));
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"c.d.Get", false, [](google::protobuf::Arena*) { return true; },
builder.GetRegistry()));
ASSERT_OK((FunctionAdapter<bool, bool>::CreateAndRegister(
"Get",
true, [](google::protobuf::Arena*, bool) { return false; },
builder.GetRegistry())));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
}
TEST(FlatExprBuilderTest, ParsedNamespacedFunctionResolutionOrderReceiverCall) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("e.Get()"));
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kAlways));
builder.set_container("a.b");
using FunctionAdapterT = FunctionAdapter<bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"a.c.d.Get", false,
[](google::protobuf::Arena*) { return false; }, builder.GetRegistry()));
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"c.d.Get", false, [](google::protobuf::Arena*) { return false; },
builder.GetRegistry()));
ASSERT_OK((FunctionAdapter<bool, bool>::CreateAndRegister(
"Get",
true, [](google::protobuf::Arena*, bool) { return true; },
builder.GetRegistry())));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(
&expr.expr(), &expr.source_info()));
google::protobuf::Arena arena;
Activation act1;
act1.InsertValue("e", CelValue::CreateBool(false));
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelBool(true));
}
TEST(FlatExprBuilderTest, ParsedNamespacedFunctionSupportDisabled) {
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse("ext.XOr(a, b)"));
cel::RuntimeOptions options;
options.fail_on_warnings = false;
CelExpressionBuilderFlatImpl builder(options);
std::vector<absl::Status> build_warnings;
builder.set_container("ext");
using FunctionAdapterT = FunctionAdapter<bool, bool, bool>;
ASSERT_OK(FunctionAdapterT::CreateAndRegister(
"ext.XOr", false,
[](google::protobuf::Arena*, bool a, bool b) { return a != b; },
builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(
auto cel_expr, builder.CreateExpression(&expr.expr(), &expr.source_info(),
&build_warnings));
google::protobuf::Arena arena;
Activation act1;
act1.InsertValue("a", CelValue::CreateBool(false));
act1.InsertValue("b", CelValue::CreateBool(true));
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(act1, &arena));
EXPECT_THAT(result, test::IsCelError(StatusIs(absl::StatusCode::kUnknown,
HasSubstr("ext"))));
}
TEST(FlatExprBuilderTest, BasicCheckedExprSupport) {
CheckedExpr expr;
google::protobuf::TextFormat::ParseFromString(R"(
expr {
id: 1
call_expr {
function: "_&&_"
args {
id: 2
ident_expr {
name: "foo"
}
}
args {
id: 3
ident_expr {
name: "bar"
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(&expr));
Activation activation;
activation.InsertValue("foo", CelValue::CreateBool(true));
activation.InsertValue("bar", CelValue::CreateBool(true));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
EXPECT_TRUE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, CheckedExprWithReferenceMap) {
CheckedExpr expr;
google::protobuf::TextFormat::ParseFromString(R"(
reference_map {
key: 2
value {
name: "foo.var1"
}
}
reference_map {
key: 4
value {
name: "bar.var2"
}
}
expr {
id: 1
call_expr {
function: "_&&_"
args {
id: 2
select_expr {
field: "var1"
operand {
id: 3
ident_expr {
name: "foo"
}
}
}
}
args {
id: 4
select_expr {
field: "var2"
operand {
ident_expr {
name: "bar"
}
}
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kCheckedOnly));
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(&expr));
Activation activation;
activation.InsertValue("foo.var1", CelValue::CreateBool(true));
activation.InsertValue("bar.var2", CelValue::CreateBool(true));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
EXPECT_TRUE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, CheckedExprWithReferenceMapFunction) {
CheckedExpr expr;
google::protobuf::TextFormat::ParseFromString(R"(
reference_map {
key: 1
value {
overload_id: "com.foo.ext.and"
}
}
reference_map {
key: 3
value {
name: "com.foo.var1"
}
}
reference_map {
key: 4
value {
name: "bar.var2"
}
}
expr {
id: 1
call_expr {
function: "and"
target {
id: 2
ident_expr {
name: "ext"
}
}
args {
id: 3
ident_expr {
name: "var1"
}
}
args {
id: 4
select_expr {
field: "var2"
operand {
id: 5
ident_expr {
name: "bar"
}
}
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kCheckedOnly));
builder.set_container("com.foo");
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK((FunctionAdapter<bool, bool, bool>::CreateAndRegister(
"com.foo.ext.and", false,
[](google::protobuf::Arena*, bool lhs, bool rhs) { return lhs && rhs; },
builder.GetRegistry())));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(&expr));
Activation activation;
activation.InsertValue("com.foo.var1", CelValue::CreateBool(true));
activation.InsertValue("bar.var2", CelValue::CreateBool(true));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
EXPECT_TRUE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, CheckedExprActivationMissesReferences) {
CheckedExpr expr;
google::protobuf::TextFormat::ParseFromString(R"(
reference_map {
key: 2
value {
name: "foo.var1"
}
}
reference_map {
key: 5
value {
name: "bar"
}
}
expr {
id: 1
call_expr {
function: "_&&_"
args {
id: 2
select_expr {
field: "var1"
operand {
id: 3
ident_expr {
name: "foo"
}
}
}
}
args {
id: 4
select_expr {
field: "var2"
operand {
id: 5
ident_expr {
name: "bar"
}
}
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kCheckedOnly));
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(&expr));
Activation activation;
activation.InsertValue("foo.var1", CelValue::CreateBool(true));
activation.InsertValue("bar.var2", CelValue::CreateBool(true));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*(result.ErrorOrDie()),
StatusIs(absl::StatusCode::kUnknown,
HasSubstr("No value with name \"bar\" found")));
std::vector<std::pair<CelValue, CelValue>> map_pairs{
{CelValue::CreateStringView("var2"), CelValue::CreateBool(false)}};
std::unique_ptr<CelMap> map_value =
*CreateContainerBackedMap(absl::MakeSpan(map_pairs));
activation.InsertValue("bar", CelValue::CreateMap(map_value.get()));
ASSERT_OK_AND_ASSIGN(result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
EXPECT_FALSE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, CheckedExprWithReferenceMapAndConstantFolding) {
CheckedExpr expr;
google::protobuf::TextFormat::ParseFromString(R"(
reference_map {
key: 3
value {
name: "var1"
value {
int64_value: 1
}
}
}
expr {
id: 1
struct_expr {
entries {
id: 2
map_key {
id: 3
ident_expr {
name: "var1"
}
}
value {
id: 4
const_expr {
string_value: "hello"
}
}
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
builder.flat_expr_builder().AddAstTransform(
NewReferenceResolverExtension(ReferenceResolverOption::kCheckedOnly));
google::protobuf::Arena arena;
auto memory_manager = ProtoMemoryManagerRef(&arena);
builder.flat_expr_builder().AddProgramOptimizer(
cel::runtime_internal::CreateConstantFoldingOptimizer(memory_manager));
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr, builder.CreateExpression(&expr));
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsMap());
auto m = result.MapOrDie();
auto v = m->Get(&arena, CelValue::CreateInt64(1L));
EXPECT_THAT(v->StringOrDie().value(), Eq("hello"));
}
TEST(FlatExprBuilderTest, ComprehensionWorksForError) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
id: 4
comprehension_expr {
iter_var: "x"
iter_range {
id: 2
call_expr {
function: "_[_]"
args {
id: 1
struct_expr {
}
}
args {
id: 3
const_expr {
int64_value: 0
}
}
}
}
accu_var: "__result__"
accu_init {
id: 7
const_expr {
bool_value: true
}
}
loop_condition {
id: 8
call_expr {
function: "__not_strictly_false__"
args {
id: 9
ident_expr {
name: "__result__"
}
}
}
}
loop_step {
id: 10
call_expr {
function: "_&&_"
args {
id: 11
ident_expr {
name: "__result__"
}
}
args {
id: 6
ident_expr {
name: "x"
}
}
}
}
result {
id: 12
ident_expr {
name: "__result__"
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsError());
}
TEST(FlatExprBuilderTest, ComprehensionWorksForNonContainer) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
id: 4
comprehension_expr {
iter_var: "x"
iter_range {
id: 2
const_expr {
int64_value: 0
}
}
accu_var: "__result__"
accu_init {
id: 7
const_expr {
bool_value: true
}
}
loop_condition {
id: 8
call_expr {
function: "__not_strictly_false__"
args {
id: 9
ident_expr {
name: "__result__"
}
}
}
}
loop_step {
id: 10
call_expr {
function: "_&&_"
args {
id: 11
ident_expr {
name: "__result__"
}
}
args {
id: 6
ident_expr {
name: "x"
}
}
}
}
result {
id: 12
ident_expr {
name: "__result__"
}
}
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsError());
EXPECT_THAT(result.ErrorOrDie()->message(),
Eq("No matching overloads found : <iter_range>"));
}
TEST(FlatExprBuilderTest, ComprehensionBudget) {
Expr expr;
SourceInfo source_info;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(R"(
comprehension_expr {
iter_var: "k"
accu_var: "accu"
accu_init {
const_expr { bool_value: true }
}
loop_condition { ident_expr { name: "accu" } }
result { ident_expr { name: "accu" } }
loop_step {
call_expr {
function: "_&&_"
args {
ident_expr { name: "accu" }
}
args {
call_expr {
function: "_>_"
args { ident_expr { name: "k" } }
args { const_expr { int64_value: 0 } }
}
}
}
}
iter_range {
list_expr {
elements { const_expr { int64_value: 1 } }
elements { const_expr { int64_value: 2 } }
}
}
})",
&expr));
cel::RuntimeOptions options;
options.comprehension_max_iterations = 1;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
Activation activation;
google::protobuf::Arena arena;
EXPECT_THAT(cel_expr->Evaluate(activation, &arena).status(),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Iteration budget exceeded")));
}
TEST(FlatExprBuilderTest, SimpleEnumTest) {
TestMessage message;
Expr expr;
SourceInfo source_info;
constexpr char enum_name[] =
"google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_1";
std::vector<std::string> enum_name_parts = absl::StrSplit(enum_name, '.');
Expr* cur_expr = &expr;
for (int i = enum_name_parts.size() - 1; i > 0; i--) {
auto select_expr = cur_expr->mutable_select_expr();
select_expr->set_field(enum_name_parts[i]);
cur_expr = select_expr->mutable_operand();
}
cur_expr->mutable_ident_expr()->set_name(enum_name_parts[0]);
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->Register(TestMessage::TestEnum_descriptor());
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, SimpleEnumIdentTest) {
TestMessage message;
Expr expr;
SourceInfo source_info;
constexpr char enum_name[] =
"google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_1";
Expr* cur_expr = &expr;
cur_expr->mutable_ident_expr()->set_name(enum_name);
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->Register(TestMessage::TestEnum_descriptor());
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(CelValue result, cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, ContainerStringFormat) {
Expr expr;
SourceInfo source_info;
expr.mutable_ident_expr()->set_name("ident");
CelExpressionBuilderFlatImpl builder;
builder.set_container("");
ASSERT_OK(builder.CreateExpression(&expr, &source_info));
builder.set_container("random.namespace");
ASSERT_OK(builder.CreateExpression(&expr, &source_info));
builder.set_container(".random.namespace");
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid expression container")));
builder.set_container("random.namespace.");
EXPECT_THAT(builder.CreateExpression(&expr, &source_info).status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid expression container")));
}
void EvalExpressionWithEnum(absl::string_view enum_name,
absl::string_view container, CelValue* result) {
TestMessage message;
Expr expr;
SourceInfo source_info;
std::vector<std::string> enum_name_parts = absl::StrSplit(enum_name, '.');
Expr* cur_expr = &expr;
for (int i = enum_name_parts.size() - 1; i > 0; i--) {
auto select_expr = cur_expr->mutable_select_expr();
select_expr->set_field(enum_name_parts[i]);
cur_expr = select_expr->mutable_operand();
}
cur_expr->mutable_ident_expr()->set_name(enum_name_parts[0]);
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->Register(TestMessage::TestEnum_descriptor());
builder.GetTypeRegistry()->Register(TestEnum_descriptor());
builder.set_container(std::string(container));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
Activation activation;
auto eval = cel_expr->Evaluate(activation, &arena);
ASSERT_OK(eval);
*result = eval.value();
}
TEST(FlatExprBuilderTest, ShortEnumResolution) {
CelValue result;
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"TestEnum.TEST_ENUM_1", "google.api.expr.runtime.TestMessage", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, FullEnumNameWithContainerResolution) {
CelValue result;
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"google.api.expr.runtime.TestMessage.TestEnum.TEST_ENUM_1",
"very.random.Namespace", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, SameShortNameEnumResolution) {
CelValue result;
ASSERT_TRUE(static_cast<int>(TestEnum::TEST_ENUM_1) !=
static_cast<int>(TestMessage::TEST_ENUM_1));
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"TestEnum.TEST_ENUM_1", "google.api.expr.runtime.TestMessage", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"TestEnum.TEST_ENUM_3", "google.api.expr.runtime.TestMessage", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestEnum::TEST_ENUM_3));
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"TestEnum.TEST_ENUM_1", "google.api.expr.runtime", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestEnum::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, PartialQualifiedEnumResolution) {
CelValue result;
ASSERT_NO_FATAL_FAILURE(EvalExpressionWithEnum(
"runtime.TestMessage.TestEnum.TEST_ENUM_1", "google.api.expr", &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(TestMessage::TEST_ENUM_1));
}
TEST(FlatExprBuilderTest, MapFieldPresence) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
id: 1,
select_expr{
operand {
id: 2
ident_expr{ name: "msg" }
}
field: "string_int32_map"
test_only: true
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
{
TestMessage message;
auto strMap = message.mutable_string_int32_map();
strMap->insert({"key", 1});
Activation activation;
activation.InsertValue("msg",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
{
TestMessage message;
Activation activation;
activation.InsertValue("msg",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_FALSE(result.BoolOrDie());
}
}
TEST(FlatExprBuilderTest, RepeatedFieldPresence) {
Expr expr;
SourceInfo source_info;
google::protobuf::TextFormat::ParseFromString(R"(
id: 1,
select_expr{
operand {
id: 2
ident_expr{ name: "msg" }
}
field: "int32_list"
test_only: true
})",
&expr);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
{
TestMessage message;
message.add_int32_list(1);
Activation activation;
activation.InsertValue("msg",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_TRUE(result.BoolOrDie());
}
{
TestMessage message;
Activation activation;
activation.InsertValue("msg",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
cel_expr->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool());
ASSERT_FALSE(result.BoolOrDie());
}
}
absl::Status RunTernaryExpression(CelValue selector, CelValue value1,
CelValue value2, google::protobuf::Arena* arena,
CelValue* result) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function(builtin::kTernary);
auto arg0 = call_expr->add_args();
arg0->mutable_ident_expr()->set_name("selector");
auto arg1 = call_expr->add_args();
arg1->mutable_ident_expr()->set_name("value1");
auto arg2 = call_expr->add_args();
arg2->mutable_ident_expr()->set_name("value2");
CelExpressionBuilderFlatImpl builder;
CEL_ASSIGN_OR_RETURN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
std::string variable = "test";
Activation activation;
activation.InsertValue("selector", selector);
activation.InsertValue("value1", value1);
activation.InsertValue("value2", value2);
CEL_ASSIGN_OR_RETURN(auto eval, cel_expr->Evaluate(activation, arena));
*result = eval;
return absl::OkStatus();
}
TEST(FlatExprBuilderTest, Ternary) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function(builtin::kTernary);
auto arg0 = call_expr->add_args();
arg0->mutable_ident_expr()->set_name("selector");
auto arg1 = call_expr->add_args();
arg1->mutable_ident_expr()->set_name("value1");
auto arg2 = call_expr->add_args();
arg2->mutable_ident_expr()->set_name("value1");
CelExpressionBuilderFlatImpl builder;
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
{
CelValue result;
ASSERT_OK(RunTernaryExpression(CelValue::CreateBool(true),
CelValue::CreateInt64(1),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(1));
UnknownSet unknown_set;
ASSERT_OK(RunTernaryExpression(CelValue::CreateBool(true),
CelValue::CreateUnknownSet(&unknown_set),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsUnknownSet());
ASSERT_OK(RunTernaryExpression(
CelValue::CreateBool(true), CelValue::CreateInt64(1),
CelValue::CreateUnknownSet(&unknown_set), &arena, &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(1));
}
{
CelValue result;
ASSERT_OK(RunTernaryExpression(CelValue::CreateBool(false),
CelValue::CreateInt64(1),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(2));
UnknownSet unknown_set;
ASSERT_OK(RunTernaryExpression(CelValue::CreateBool(false),
CelValue::CreateUnknownSet(&unknown_set),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsInt64());
EXPECT_THAT(result.Int64OrDie(), Eq(2));
ASSERT_OK(RunTernaryExpression(
CelValue::CreateBool(false), CelValue::CreateInt64(1),
CelValue::CreateUnknownSet(&unknown_set), &arena, &result));
ASSERT_TRUE(result.IsUnknownSet());
}
{
CelValue result;
ASSERT_OK(RunTernaryExpression(CreateErrorValue(&arena, "error"),
CelValue::CreateInt64(1),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsError());
}
{
UnknownSet unknown_set;
CelValue result;
ASSERT_OK(RunTernaryExpression(CelValue::CreateUnknownSet(&unknown_set),
CelValue::CreateInt64(1),
CelValue::CreateInt64(2), &arena, &result));
ASSERT_TRUE(result.IsUnknownSet());
EXPECT_THAT(unknown_set, Eq(*result.UnknownSetOrDie()));
}
{
CelAttribute selector_attr("selector", {});
CelAttribute value1_attr("value1", {});
CelAttribute value2_attr("value2", {});
UnknownSet unknown_selector(UnknownAttributeSet({selector_attr}));
UnknownSet unknown_value1(UnknownAttributeSet({value1_attr}));
UnknownSet unknown_value2(UnknownAttributeSet({value2_attr}));
CelValue result;
ASSERT_OK(RunTernaryExpression(
CelValue::CreateUnknownSet(&unknown_selector),
CelValue::CreateUnknownSet(&unknown_value1),
CelValue::CreateUnknownSet(&unknown_value2), &arena, &result));
ASSERT_TRUE(result.IsUnknownSet());
const UnknownSet* result_set = result.UnknownSetOrDie();
EXPECT_THAT(result_set->unknown_attributes().size(), Eq(1));
EXPECT_THAT(result_set->unknown_attributes().begin()->variable_name(),
Eq("selector"));
}
}
TEST(FlatExprBuilderTest, EmptyCallList) {
std::vector<std::string> operators = {"_&&_", "_||_", "_?_:_"};
for (const auto& op : operators) {
Expr expr;
SourceInfo source_info;
auto call_expr = expr.mutable_call_expr();
call_expr->set_function(op);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
auto build = builder.CreateExpression(&expr, &source_info);
ASSERT_FALSE(build.ok());
}
}
TEST(FlatExprBuilderTest, HeterogeneousListsAllowed) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("[17, 'seventeen']"));
cel::RuntimeOptions options;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsList()) << result.DebugString();
const auto& list = *result.ListOrDie();
ASSERT_EQ(list.size(), 2);
CelValue elem0 = list.Get(&arena, 0);
CelValue elem1 = list.Get(&arena, 1);
EXPECT_THAT(elem0, test::IsCelInt64(17));
EXPECT_THAT(elem1, test::IsCelString("seventeen"));
}
TEST(FlatExprBuilderTest, NullUnboxingEnabled) {
TestMessage message;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("message.int32_wrapper_value"));
cel::RuntimeOptions options;
options.enable_empty_wrapper_null_unboxing = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("message",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_TRUE(result.IsNull());
}
TEST(FlatExprBuilderTest, TypeResolve) {
TestMessage message;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("type(message) == runtime.TestMessage"));
cel::RuntimeOptions options;
options.enable_qualified_type_identifiers = true;
CelExpressionBuilderFlatImpl builder(options);
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.set_container("google.api.expr");
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("message",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsBool()) << result.DebugString();
EXPECT_TRUE(result.BoolOrDie());
}
TEST(FlatExprBuilderTest, AnyPackingList) {
google::protobuf::LinkMessageReflection<TestAllTypes>();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("TestAllTypes{single_any: [1, 2, 3]}"));
cel::RuntimeOptions options;
CelExpressionBuilderFlatImpl builder(options);
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.set_container("google.api.expr.test.v1.proto3");
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result,
test::IsCelMessage(EqualsProto(
R"pb(single_any {
[type.googleapis.com/google.protobuf.ListValue] {
values { number_value: 1 }
values { number_value: 2 }
values { number_value: 3 }
}
})pb")))
<< result.DebugString();
}
TEST(FlatExprBuilderTest, AnyPackingNestedNumbers) {
google::protobuf::LinkMessageReflection<TestAllTypes>();
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("TestAllTypes{single_any: [1, 2.3]}"));
cel::RuntimeOptions options;
CelExpressionBuilderFlatImpl builder(options);
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.set_container("google.api.expr.test.v1.proto3");
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result,
test::IsCelMessage(EqualsProto(
R"pb(single_any {
[type.googleapis.com/google.protobuf.ListValue] {
values { number_value: 1 }
values { number_value: 2.3 }
}
})pb")))
<< result.DebugString();
}
TEST(FlatExprBuilderTest, AnyPackingInt) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("TestAllTypes{single_any: 1}"));
cel::RuntimeOptions options;
CelExpressionBuilderFlatImpl builder(options);
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.set_container("google.api.expr.test.v1.proto3");
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(
result,
test::IsCelMessage(EqualsProto(
R"pb(single_any {
[type.googleapis.com/google.protobuf.Int64Value] { value: 1 }
})pb")))
<< result.DebugString();
}
TEST(FlatExprBuilderTest, AnyPackingMap) {
ASSERT_OK_AND_ASSIGN(
ParsedExpr parsed_expr,
parser::Parse("TestAllTypes{single_any: {'key': 'value'}}"));
cel::RuntimeOptions options;
CelExpressionBuilderFlatImpl builder(options);
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
builder.set_container("google.api.expr.test.v1.proto3");
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelMessage(EqualsProto(
R"pb(single_any {
[type.googleapis.com/google.protobuf.Struct] {
fields {
key: "key"
value { string_value: "value" }
}
}
})pb")))
<< result.DebugString();
}
TEST(FlatExprBuilderTest, NullUnboxingDisabled) {
TestMessage message;
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("message.int32_wrapper_value"));
cel::RuntimeOptions options;
options.enable_empty_wrapper_null_unboxing = false;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("message",
CelProtoWrapper::CreateMessage(&message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(0));
}
TEST(FlatExprBuilderTest, HeterogeneousEqualityEnabled) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("{1: 2, 2u: 3}[1.0]"));
cel::RuntimeOptions options;
options.enable_heterogeneous_equality = true;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(2));
}
TEST(FlatExprBuilderTest, HeterogeneousEqualityDisabled) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("{1: 2, 2u: 3}[1.0]"));
cel::RuntimeOptions options;
options.enable_heterogeneous_equality = false;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result,
test::IsCelError(StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Invalid map key type"))));
}
TEST(FlatExprBuilderTest, CustomDescriptorPoolForCreateStruct) {
ASSERT_OK_AND_ASSIGN(
ParsedExpr parsed_expr,
parser::Parse("google.api.expr.runtime.SimpleTestMessage{}"));
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(
google::protobuf::DescriptorPool::generated_pool(),
google::protobuf::MessageFactory::generated_factory()));
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument));
google::protobuf::DescriptorPool desc_pool;
google::protobuf::FileDescriptorSet filedesc_set;
ASSERT_OK(ReadBinaryProtoFromFile(kSimpleTestMessageDescriptorSetFile,
filedesc_set));
ASSERT_EQ(filedesc_set.file_size(), 1);
desc_pool.BuildFile(filedesc_set.file(0));
google::protobuf::DynamicMessageFactory message_factory(&desc_pool);
CelExpressionBuilderFlatImpl builder2;
builder2.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(&desc_pool,
&message_factory));
ASSERT_OK_AND_ASSIGN(auto expression,
builder2.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
ASSERT_TRUE(result.IsMessage());
EXPECT_EQ(result.MessageOrDie()->GetTypeName(),
"google.api.expr.runtime.SimpleTestMessage");
}
TEST(FlatExprBuilderTest, CustomDescriptorPoolForSelect) {
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr,
parser::Parse("message.int64_value"));
google::protobuf::DescriptorPool desc_pool;
google::protobuf::FileDescriptorSet filedesc_set;
ASSERT_OK(ReadBinaryProtoFromFile(kSimpleTestMessageDescriptorSetFile,
filedesc_set));
ASSERT_EQ(filedesc_set.file_size(), 1);
desc_pool.BuildFile(filedesc_set.file(0));
google::protobuf::DynamicMessageFactory message_factory(&desc_pool);
const google::protobuf::Descriptor* desc = desc_pool.FindMessageTypeByName(
"google.api.expr.runtime.SimpleTestMessage");
const google::protobuf::Message* message_prototype = message_factory.GetPrototype(desc);
google::protobuf::Message* message = message_prototype->New();
const google::protobuf::Reflection* refl = message->GetReflection();
const google::protobuf::FieldDescriptor* field = desc->FindFieldByName("int64_value");
refl->SetInt64(message, field, 123);
CelExpressionBuilderFlatImpl builder;
ASSERT_OK_AND_ASSIGN(auto expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
google::protobuf::Arena arena;
activation.InsertValue("message",
CelProtoWrapper::CreateMessage(message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result, test::IsCelInt64(123));
delete message;
}
std::pair<google::protobuf::Message*, const google::protobuf::Reflection*> CreateTestMessage(
const google::protobuf::DescriptorPool& descriptor_pool,
google::protobuf::MessageFactory& message_factory, absl::string_view name) {
const google::protobuf::Descriptor* desc = descriptor_pool.FindMessageTypeByName(name);
const google::protobuf::Message* message_prototype = message_factory.GetPrototype(desc);
google::protobuf::Message* message = message_prototype->New();
const google::protobuf::Reflection* refl = message->GetReflection();
return std::make_pair(message, refl);
}
struct CustomDescriptorPoolTestParam final {
using SetterFunction =
std::function<void(google::protobuf::Message*, const google::protobuf::Reflection*,
const google::protobuf::FieldDescriptor*)>;
std::string message_type;
std::string field_name;
SetterFunction setter;
test::CelValueMatcher matcher;
};
class CustomDescriptorPoolTest
: public ::testing::TestWithParam<CustomDescriptorPoolTestParam> {};
TEST_P(CustomDescriptorPoolTest, TestType) {
const CustomDescriptorPoolTestParam& p = GetParam();
google::protobuf::DescriptorPool descriptor_pool;
google::protobuf::Arena arena;
ASSERT_OK(AddStandardMessageTypesToDescriptorPool(descriptor_pool));
google::protobuf::DynamicMessageFactory message_factory(&descriptor_pool);
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, parser::Parse("m"));
CelExpressionBuilderFlatImpl builder;
builder.GetTypeRegistry()->RegisterTypeProvider(
std::make_unique<ProtobufDescriptorProvider>(&descriptor_pool,
&message_factory));
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
auto [message, reflection] =
CreateTestMessage(descriptor_pool, message_factory, p.message_type);
const google::protobuf::FieldDescriptor* field =
message->GetDescriptor()->FindFieldByName(p.field_name);
p.setter(message, reflection, field);
ASSERT_OK_AND_ASSIGN(std::unique_ptr<CelExpression> expression,
builder.CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
Activation activation;
activation.InsertValue("m", CelProtoWrapper::CreateMessage(message, &arena));
ASSERT_OK_AND_ASSIGN(CelValue result,
expression->Evaluate(activation, &arena));
EXPECT_THAT(result, p.matcher);
delete message;
}
INSTANTIATE_TEST_SUITE_P(
ValueTypes, CustomDescriptorPoolTest,
::testing::ValuesIn(std::vector<CustomDescriptorPoolTestParam>{
{"google.protobuf.Duration", "seconds",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetInt64(message, field, 10);
},
test::IsCelDuration(absl::Seconds(10))},
{"google.protobuf.DoubleValue", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetDouble(message, field, 1.2);
},
test::IsCelDouble(1.2)},
{"google.protobuf.Int64Value", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetInt64(message, field, -23);
},
test::IsCelInt64(-23)},
{"google.protobuf.UInt64Value", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetUInt64(message, field, 42);
},
test::IsCelUint64(42)},
{"google.protobuf.BoolValue", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetBool(message, field, true);
},
test::IsCelBool(true)},
{"google.protobuf.StringValue", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetString(message, field, "foo");
},
test::IsCelString("foo")},
{"google.protobuf.BytesValue", "value",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetString(message, field, "bar");
},
test::IsCelBytes("bar")},
{"google.protobuf.Timestamp", "seconds",
[](google::protobuf::Message* message, const google::protobuf::Reflection* reflection,
const google::protobuf::FieldDescriptor* field) {
reflection->SetInt64(message, field, 20);
},
test::IsCelTimestamp(absl::FromUnixSeconds(20))}}));
struct ConstantFoldingTestCase {
std::string test_name;
std::string expr;
test::CelValueMatcher matcher;
absl::flat_hash_map<std::string, int64_t> values;
};
class UnknownFunctionImpl : public cel::Function {
absl::StatusOr<Value> Invoke(const cel::Function::InvokeContext& ctx,
absl::Span<const Value> args) const override {
return ctx.value_factory().CreateUnknownValue();
}
};
absl::StatusOr<std::unique_ptr<CelExpressionBuilder>>
CreateConstantFoldingConformanceTestExprBuilder(
const InterpreterOptions& options) {
auto builder =
google::api::expr::runtime::CreateCelExpressionBuilder(options);
CEL_RETURN_IF_ERROR(
RegisterBuiltinFunctions(builder->GetRegistry(), options));
CEL_RETURN_IF_ERROR(builder->GetRegistry()->RegisterLazyFunction(
cel::FunctionDescriptor("LazyFunction", false, {})));
CEL_RETURN_IF_ERROR(builder->GetRegistry()->RegisterLazyFunction(
cel::FunctionDescriptor("LazyFunction", false, {cel::Kind::kBool})));
CEL_RETURN_IF_ERROR(builder->GetRegistry()->Register(
cel::FunctionDescriptor("UnknownFunction", false, {}),
std::make_unique<UnknownFunctionImpl>()));
return builder;
}
class ConstantFoldingConformanceTest
: public ::testing::TestWithParam<ConstantFoldingTestCase> {
protected:
google::protobuf::Arena arena_;
};
TEST_P(ConstantFoldingConformanceTest, Updated) {
InterpreterOptions options;
options.constant_folding = true;
options.constant_arena = &arena_;
options.enable_comprehension_list_append = true;
const ConstantFoldingTestCase& p = GetParam();
ASSERT_OK_AND_ASSIGN(
auto builder, CreateConstantFoldingConformanceTestExprBuilder(options));
ASSERT_OK_AND_ASSIGN(ParsedExpr expr, parser::Parse(p.expr));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
Activation activation;
ASSERT_OK(activation.InsertFunction(
PortableUnaryFunctionAdapter<bool, bool>::Create(
"LazyFunction", false,
[](google::protobuf::Arena* arena, bool val) { return val; })));
for (auto iter = p.values.begin(); iter != p.values.end(); ++iter) {
activation.InsertValue(iter->first, CelValue::CreateInt64(iter->second));
}
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena_));
ASSERT_OK_AND_ASSIGN(result, plan->Evaluate(activation, &arena_));
EXPECT_THAT(result, p.matcher);
}
INSTANTIATE_TEST_SUITE_P(
Exprs, ConstantFoldingConformanceTest,
::testing::ValuesIn(std::vector<ConstantFoldingTestCase>{
{"simple_add", "1 + 2 + 3", test::IsCelInt64(6)},
{"add_with_var",
"1 + (2 + (3 + id))",
test::IsCelInt64(10),
{{"id", 4}}},
{"const_list", "[1, 2, 3, 4]", test::IsCelList(_)},
{"mixed_const_list",
"[1, 2, 3, 4] + [id]",
test::IsCelList(_),
{{"id", 5}}},
{"create_struct", "{'abc': 'def', 'def': 'efg', 'efg': 'hij'}",
Truly([](const CelValue& v) { return v.IsMap(); })},
{"field_selection", "{'abc': 123}.abc == 123", test::IsCelBool(true)},
{"type_coverage",
R"cel(
[type(bool),
type(123),
type(123u),
type(12.3),
type(b'123'),
type('123'),
type(null),
type(timestamp(0)),
type(duration('1h'))
])cel",
test::IsCelList(SizeIs(9))},
{"lazy_function", "true || LazyFunction()", test::IsCelBool(true)},
{"lazy_function_called", "LazyFunction(true) || false",
test::IsCelBool(true)},
{"unknown_function", "UnknownFunction() && false",
test::IsCelBool(false)},
{"nested_comprehension",
"[1, 2, 3, 4].all(x, [5, 6, 7, 8].all(y, x < y))",
test::IsCelBool(true)},
{"map", "[1, 2, 3, 4].map(x, x * 2).size() == 4",
test::IsCelBool(true)},
{"str_cat",
"'1234567890' + '1234567890' + '1234567890' + '1234567890' + "
"'1234567890'",
test::IsCelString(
"12345678901234567890123456789012345678901234567890")}}));
TEST(UpdatedConstantFolding, FoldsLists) {
InterpreterOptions options;
google::protobuf::Arena arena;
options.constant_folding = true;
options.constant_arena = &arena;
ASSERT_OK_AND_ASSIGN(
auto builder, CreateConstantFoldingConformanceTestExprBuilder(options));
ASSERT_OK_AND_ASSIGN(ParsedExpr expr,
parser::Parse("[1] + [2] + [3] + [4] + [5] + [6] + [7] "
"+ [8] + [9] + [10] + [11] + [12]"));
ASSERT_OK_AND_ASSIGN(
auto plan, builder->CreateExpression(&expr.expr(), &expr.source_info()));
Activation activation;
int before_size = arena.SpaceUsed();
ASSERT_OK_AND_ASSIGN(CelValue result, plan->Evaluate(activation, &arena));
EXPECT_LE(arena.SpaceUsed() - before_size, 512);
EXPECT_THAT(result, test::IsCelList(SizeIs(12)));
}
TEST(FlatExprBuilderTest, BlockBadIndex) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args {
list_expr: { elements { const_expr: { string_value: "foo" } } }
}
args { ident_expr: { name: "@index-1" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("bad @index")));
}
TEST(FlatExprBuilderTest, OutOfRangeBlockIndex) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args {
list_expr: { elements { const_expr: { string_value: "foo" } } }
}
args { ident_expr: { name: "@index1" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid @index greater than number of bindings:")));
}
TEST(FlatExprBuilderTest, EarlyBlockIndex) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args { list_expr: { elements { ident_expr: { name: "@index0" } } } }
args { ident_expr: { name: "@index0" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("@index references current or future binding:")));
}
TEST(FlatExprBuilderTest, OutOfScopeCSE) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: { ident_expr: { name: "@ac:0:0" } }
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("out of scope reference to CSE generated "
"comprehension variable")));
}
TEST(FlatExprBuilderTest, BlockMissingBindings) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: { call_expr: { function: "cel.@block" } }
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"malformed cel.@block: missing list of bound expressions")));
}
TEST(FlatExprBuilderTest, BlockMissingExpression) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args { list_expr: {} }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("malformed cel.@block: missing bound expression")));
}
TEST(FlatExprBuilderTest, BlockNotListOfBoundExpressions) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args { ident_expr: { name: "@index0" } }
args { ident_expr: { name: "@index0" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("malformed cel.@block: first argument is not a list "
"of bound expressions")));
}
TEST(FlatExprBuilderTest, BlockEmptyListOfBoundExpressions) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args { list_expr: {} }
args { ident_expr: { name: "@index0" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"malformed cel.@block: list of bound expressions is empty")));
}
TEST(FlatExprBuilderTest, BlockOptionalListOfBoundExpressions) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args {
list_expr: {
elements { const_expr: { string_value: "foo" } }
optional_indices: [ 0 ]
}
}
args { ident_expr: { name: "@index0" } }
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("malformed cel.@block: list of bound expressions "
"contains an optional")));
}
TEST(FlatExprBuilderTest, BlockNested) {
ParsedExpr parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr: {
call_expr: {
function: "cel.@block"
args {
list_expr: { elements { const_expr: { string_value: "foo" } } }
}
args {
call_expr: {
function: "cel.@block"
args {
list_expr: {
elements { const_expr: { string_value: "foo" } }
}
}
args { ident_expr: { name: "@index1" } }
}
}
}
}
)pb",
&parsed_expr));
CelExpressionBuilderFlatImpl builder;
EXPECT_THAT(
builder.CreateExpression(&parsed_expr.expr(), &parsed_expr.source_info()),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("multiple cel.@block are not allowed")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/flat_expr_builder.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/compiler/flat_expr_builder_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
241c7c0e-4f18-4489-be6e-d7b6a4d84b49 | cpp | tensorflow/tensorflow | builtin_op_data | tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h | tensorflow/lite/core/c/builtin_op_data_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_CORE_C_BUILTIN_OP_DATA_H_
typedef enum {
kTfLitePaddingUnknown = 0,
kTfLitePaddingSame,
kTfLitePaddingValid,
} TfLitePadding;
typedef enum {
kTfLiteActNone = 0,
kTfLiteActRelu,
kTfLiteActReluN1To1,
kTfLiteActRelu6,
kTfLiteActTanh,
kTfLiteActSignBit,
kTfLiteActSigmoid,
} TfLiteFusedActivation;
typedef struct {
int width;
int height;
int width_offset;
int height_offset;
} TfLitePaddingValues;
typedef struct {
TfLitePadding padding;
int stride_width;
int stride_height;
int filter_width;
int filter_height;
TfLiteFusedActivation activation;
struct {
TfLitePaddingValues padding;
} computed;
} TfLitePoolParams;
#endif | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include <gtest/gtest.h>
namespace tflite {
TEST(IntArray, CanCompileStructs) {
TfLitePadding padding = kTfLitePaddingSame;
TfLitePaddingValues padding_values;
TfLiteFusedActivation fused_activation = kTfLiteActRelu;
TfLiteConvParams conv_params;
TfLitePoolParams pool_params;
TfLiteDepthwiseConvParams depthwise_conv_params;
TfLiteSVDFParams svdf_params;
TfLiteRNNParams rnn_params;
TfLiteSequenceRNNParams sequence_rnn_params;
TfLiteFullyConnectedWeightsFormat fully_connected_weights_format =
kTfLiteFullyConnectedWeightsFormatDefault;
TfLiteFullyConnectedParams fully_connected_params;
TfLiteLSHProjectionType projection_type = kTfLiteLshProjectionDense;
TfLiteLSHProjectionParams projection_params;
TfLiteSoftmaxParams softmax_params;
TfLiteConcatenationParams concatenation_params;
TfLiteAddParams add_params;
TfLiteSpaceToBatchNDParams space_to_batch_nd_params;
TfLiteBatchToSpaceNDParams batch_to_space_nd_params;
TfLiteMulParams mul_params;
TfLiteSubParams sub_params;
TfLiteDivParams div_params;
TfLiteL2NormParams l2_norm_params;
TfLiteLocalResponseNormParams local_response_norm_params;
TfLiteLSTMKernelType lstm_kernel_type = kTfLiteLSTMBasicKernel;
TfLiteLSTMParams lstm_params;
TfLiteResizeBilinearParams resize_bilinear_params;
TfLitePadParams pad_params;
TfLitePadV2Params pad_v2_params;
TfLiteReshapeParams reshape_params;
TfLiteSkipGramParams skip_gram_params;
TfLiteSpaceToDepthParams space_to_depth_params;
TfLiteDepthToSpaceParams depth_to_space_params;
TfLiteCastParams cast_params;
TfLiteCombinerType combiner_type = kTfLiteCombinerTypeSqrtn;
TfLiteEmbeddingLookupSparseParams lookup_sparse_params;
TfLiteGatherParams gather_params;
TfLiteTransposeParams transpose_params;
TfLiteReducerParams reducer_params;
TfLiteSplitParams split_params;
TfLiteSplitVParams split_v_params;
TfLiteSqueezeParams squeeze_params;
TfLiteStridedSliceParams strided_slice_params;
TfLiteArgMaxParams arg_max_params;
TfLiteArgMinParams arg_min_params;
TfLiteTransposeConvParams transpose_conv_params;
TfLiteSparseToDenseParams sparse_to_dense_params;
TfLiteShapeParams shape_params;
TfLiteRankParams rank_params;
TfLiteFakeQuantParams fake_quant_params;
TfLitePackParams pack_params;
TfLiteUnpackParams unpack_params;
TfLiteOneHotParams one_hot_params;
TfLiteBidirectionalSequenceRNNParams bidi_sequence_rnn_params;
TfLiteBidirectionalSequenceLSTMParams bidi_sequence_lstm_params;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/builtin_op_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4756e541-df87-49ee-8f7e-723acdc0b81c | cpp | google/quiche | quiche_random | quiche/common/quiche_random.cc | quiche/common/quiche_random_test.cc | #include "quiche/common/quiche_random.h"
#include <cstdint>
#include <cstring>
#include "openssl/rand.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
inline uint64_t Xoshiro256InitializeRngStateMember() {
uint64_t result;
RAND_bytes(reinterpret_cast<uint8_t*>(&result), sizeof(result));
return result;
}
inline uint64_t Xoshiro256PlusPlusRotLeft(uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
uint64_t Xoshiro256PlusPlus() {
static thread_local uint64_t rng_state[4] = {
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember(),
Xoshiro256InitializeRngStateMember()};
const uint64_t result =
Xoshiro256PlusPlusRotLeft(rng_state[0] + rng_state[3], 23) + rng_state[0];
const uint64_t t = rng_state[1] << 17;
rng_state[2] ^= rng_state[0];
rng_state[3] ^= rng_state[1];
rng_state[1] ^= rng_state[2];
rng_state[0] ^= rng_state[3];
rng_state[2] ^= t;
rng_state[3] = Xoshiro256PlusPlusRotLeft(rng_state[3], 45);
return result;
}
class DefaultQuicheRandom : public QuicheRandom {
public:
DefaultQuicheRandom() {}
DefaultQuicheRandom(const DefaultQuicheRandom&) = delete;
DefaultQuicheRandom& operator=(const DefaultQuicheRandom&) = delete;
~DefaultQuicheRandom() override {}
void RandBytes(void* data, size_t len) override;
uint64_t RandUint64() override;
void InsecureRandBytes(void* data, size_t len) override;
uint64_t InsecureRandUint64() override;
};
void DefaultQuicheRandom::RandBytes(void* data, size_t len) {
RAND_bytes(reinterpret_cast<uint8_t*>(data), len);
}
uint64_t DefaultQuicheRandom::RandUint64() {
uint64_t value;
RandBytes(&value, sizeof(value));
return value;
}
void DefaultQuicheRandom::InsecureRandBytes(void* data, size_t len) {
while (len >= sizeof(uint64_t)) {
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, sizeof(uint64_t));
data = reinterpret_cast<char*>(data) + sizeof(uint64_t);
len -= sizeof(uint64_t);
}
if (len > 0) {
QUICHE_DCHECK_LT(len, sizeof(uint64_t));
uint64_t random_bytes64 = Xoshiro256PlusPlus();
memcpy(data, &random_bytes64, len);
}
}
uint64_t DefaultQuicheRandom::InsecureRandUint64() {
return Xoshiro256PlusPlus();
}
}
QuicheRandom* QuicheRandom::GetInstance() {
static DefaultQuicheRandom* random = new DefaultQuicheRandom();
return random;
}
} | #include "quiche/common/quiche_random.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
TEST(QuicheRandom, RandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->RandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, RandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->RandUint64();
uint64_t value2 = rng->RandUint64();
EXPECT_NE(value1, value2);
}
TEST(QuicheRandom, InsecureRandBytes) {
unsigned char buf1[16];
unsigned char buf2[16];
memset(buf1, 0xaf, sizeof(buf1));
memset(buf2, 0xaf, sizeof(buf2));
ASSERT_EQ(0, memcmp(buf1, buf2, sizeof(buf1)));
auto rng = QuicheRandom::GetInstance();
rng->InsecureRandBytes(buf1, sizeof(buf1));
EXPECT_NE(0, memcmp(buf1, buf2, sizeof(buf1)));
}
TEST(QuicheRandom, InsecureRandUint64) {
auto rng = QuicheRandom::GetInstance();
uint64_t value1 = rng->InsecureRandUint64();
uint64_t value2 = rng->InsecureRandUint64();
EXPECT_NE(value1, value2);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_random.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_random_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d44286ee-dc18-4d49-81ae-9830d313d9ec | cpp | tensorflow/tensorflow | fusion_node_indexing_evaluation | third_party/xla/xla/service/fusion_node_indexing_evaluation.cc | third_party/xla/xla/service/fusion_node_indexing_evaluation_test.cc | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/types.h"
#include "tsl/platform/logging.h"
namespace xla {
FusionNodeIndexingEvaluation::FusionNodeIndexingEvaluation(
const HloInstruction* fusion, int64_t root_usage_count)
: fusion_(fusion) {
HloInstruction* root = fusion->fused_expression_root();
indexing_users_[root].insert(fusion);
index_usage_count_[fusion] = root_usage_count;
RecomputeCache();
}
const int64_t FusionNodeIndexingEvaluation::kAllowedCodeDuplication = 15;
namespace {
int64_t UserCount(const HloInstruction* hlo) {
int64_t cnt = 0;
for (HloInstruction* user : hlo->users()) {
if (user->opcode() == HloOpcode::kFusion) {
int64_t operand_index = user->operand_index(hlo);
cnt += user->fused_parameter(operand_index)->user_count();
} else {
++cnt;
}
}
return cnt;
}
}
bool FusionNodeIndexingEvaluation::CodeDuplicationTooHigh(
const HloInstruction* producer) const {
if (producer->opcode() == HloOpcode::kBroadcast) {
return false;
}
int64_t emitted_instructions = EvaluateEmittedInstructions(producer);
return emitted_instructions > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(producer) &&
(emitted_instructions > 1 || UserCount(producer) > 1));
}
bool FusionNodeIndexingEvaluation::MaxCodeDuplicationTooHigh() const {
for (const auto& entry : index_usage_count_) {
if (entry.second > kAllowedCodeDuplication ||
(ElementalIrEmitter::OpInvalidatesCache(entry.first) &&
(entry.second > 1 || UserCount(entry.first) > 1))) {
return true;
}
}
return false;
}
int64_t FusionNodeIndexingEvaluation::EvaluateEmittedInstructions(
const HloInstruction* producer) const {
int64_t total = 0;
for (const auto* user : indexing_users_.at(producer)) {
total += index_usage_count_.at(user);
}
return total;
}
void FusionNodeIndexingEvaluation::UpdateEvaluationCache(
const HloInstruction* producer,
absl::flat_hash_set<const HloInstruction*> indexing_users_of_producer) {
CHECK(!indexing_users_.contains(producer));
indexing_users_[producer] = std::move(indexing_users_of_producer);
UpdateIndexUsageCount(producer);
UpdateIndexingUsersOfOperands(producer);
}
absl::flat_hash_set<const HloInstruction*>
FusionNodeIndexingEvaluation::RemoveFusionOperand(
HloInstruction* fusion_operand) {
auto indexing_users_of_operand =
std::move(indexing_users_.at(fusion_operand));
indexing_users_.erase(fusion_operand);
CHECK(!index_usage_count_.contains(fusion_operand));
return indexing_users_of_operand;
}
void FusionNodeIndexingEvaluation::RecomputeCache() {
auto postorder =
fusion_->fused_instructions_computation()->MakeInstructionPostOrder();
std::reverse(postorder.begin(), postorder.end());
for (const auto* instruction : postorder) {
if (instruction->opcode() == HloOpcode::kParameter) {
continue;
}
UpdateIndexUsageCount(instruction);
UpdateIndexingUsersOfOperands(instruction);
}
}
void FusionNodeIndexingEvaluation::UpdateIndexUsageCount(
const HloInstruction* instruction) {
int64_t total = 0;
for (const auto* user : indexing_users_[instruction]) {
total += index_usage_count_.at(user);
}
CHECK(index_usage_count_.emplace(instruction, total).second);
}
void FusionNodeIndexingEvaluation::UpdateIndexingUsersOfOperands(
const HloInstruction* instruction) {
for (const auto* operand : instruction->operands()) {
if (operand->opcode() == HloOpcode::kParameter) {
operand = fusion_->operand(operand->parameter_number());
}
if (instruction->opcode() == HloOpcode::kTranspose ||
Shape::Equal().IgnoreElementType()(operand->shape(),
instruction->shape())) {
indexing_users_[operand].insert(indexing_users_[instruction].begin(),
indexing_users_[instruction].end());
} else {
indexing_users_[operand].insert(instruction);
}
}
}
} | #include "xla/service/fusion_node_indexing_evaluation.h"
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
using FusionNodeIndexingEvaluationTest = HloTestBase;
class InstructionFusionForTesting : public InstructionFusion {
public:
explicit InstructionFusionForTesting()
: InstructionFusion(InstructionFusion::IsExpensive) {}
HloInstruction* FuseInstruction(HloInstruction* fusion_instruction,
HloInstruction* producer) override {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation =
fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation) override {
return InstructionFusion::Fuse(producer, consumer, computation);
}
int64_t EvaluateEmittedInstructions(const HloInstruction* producer,
const HloInstruction* consumer) {
if (consumer->opcode() != HloOpcode::kFusion) {
return 0;
}
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
return fusion_node_evaluations_.at(consumer).EvaluateEmittedInstructions(
producer);
}
const FusionNodeIndexingEvaluation* GetFusionNodeEvaluation(
const HloInstruction* consumer) {
auto it = fusion_node_evaluations_.find(consumer);
if (it == fusion_node_evaluations_.end()) {
return nullptr;
}
return &it->second;
}
private:
absl::flat_hash_map<const HloInstruction*, FusionNodeIndexingEvaluation>
fusion_node_evaluations_;
};
TEST_F(FusionNodeIndexingEvaluationTest, FuseTwoInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4,3]{1,0} parameter(0)
add = f32[4,3]{1,0} add(p0, p0)
ROOT sub = f32[4,3]{1,0} subtract(add, p0)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
HloInstruction* add = sub->mutable_operand(0);
InstructionFusionForTesting().Fuse(add, sub, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, FuseThreeInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
slice1 = f32[3]{0} slice(p0), slice={[0:3]}
slice2 = f32[3]{0} slice(p0), slice={[0:3]}
ROOT sub = f32[3]{0} subtract(slice1, slice2)
})")
.value();
HloInstruction* sub = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice1 = sub->mutable_operand(0);
HloInstruction* slice2 = sub->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice1, sub, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2, fusion), 1);
instruction_fusion.Fuse(slice2, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, ExponentialDuplicationPattern) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
slice1.0 = f32[3]{0} slice(add0), slice={[0:3]}
slice1.1 = f32[3]{0} slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2]{0} slice(add1), slice={[0:2]}
slice2.1 = f32[2]{0} slice(add1), slice={[1:3]}
ROOT add2 = f32[2]{0} add(slice2.0, slice2.1)
})")
.value();
HloInstruction* add2 = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* slice2_0 = add2->mutable_operand(0);
HloInstruction* slice2_1 = add2->mutable_operand(1);
auto fusion =
instruction_fusion.Fuse(slice2_0, add2, module->entry_computation());
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice2_1, fusion),
1);
instruction_fusion.Fuse(slice2_1, fusion, module->entry_computation());
HloInstruction* add1 = fusion->mutable_operand(0);
EXPECT_EQ(add1->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add1, fusion), 2);
instruction_fusion.Fuse(add1, fusion, module->entry_computation());
HloInstruction* slice1_0 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_0->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_0, fusion),
2);
instruction_fusion.Fuse(slice1_0, fusion, module->entry_computation());
HloInstruction* slice1_1 = fusion->mutable_operand(0);
EXPECT_EQ(slice1_1->opcode(), HloOpcode::kSlice);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(slice1_1, fusion),
2);
instruction_fusion.Fuse(slice1_1, fusion, module->entry_computation());
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
instruction_fusion.Fuse(add0, fusion, module->entry_computation());
}
TEST_F(FusionNodeIndexingEvaluationTest, RecomputeCache) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param_0.5: f32[4]) -> f32[2] {
%param_0.5 = f32[4]{0} parameter(0)
%slice1.2 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[0:3]}
%slice1.3 = f32[3]{0} slice(f32[4]{0} %param_0.5), slice={[1:4]}
%add1.1 = f32[3]{0} add(f32[3]{0} %slice1.2, f32[3]{0} %slice1.3)
%slice2.2 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[0:2]}
%slice2.3 = f32[2]{0} slice(f32[3]{0} %add1.1), slice={[1:3]}
ROOT %add2.1 = f32[2]{0} add(f32[2]{0} %slice2.2, f32[2]{0} %slice2.3)
}
ENTRY entry_computation {
p0 = f32[4]{0} parameter(0)
p1 = f32[4]{0} parameter(1)
add0 = f32[4]{0} add(p0, p1)
ROOT %fusion = f32[2]{0} fusion(add0), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* add0 = fusion->mutable_operand(0);
EXPECT_EQ(add0->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add0, fusion), 4);
}
TEST_F(FusionNodeIndexingEvaluationTest, CodeDuplicationTooHigh) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%param = f32[6]{0} parameter(0)
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
ENTRY entry_computation {
p0 = f32[] parameter(0)
add = f32[] add(p0, p0)
broadcast = f32[6]{0} broadcast(add), dimensions={}
ROOT %fusion = f32[2]{0} fusion(broadcast), kind=kLoop, calls=%fused_computation
})")
.value();
HloInstruction* fusion = module->entry_computation()->root_instruction();
InstructionFusionForTesting instruction_fusion;
HloInstruction* broadcast = fusion->mutable_operand(0);
EXPECT_EQ(broadcast->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(broadcast, fusion),
16);
EXPECT_FALSE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(broadcast));
instruction_fusion.Fuse(broadcast, fusion, module->entry_computation());
HloInstruction* add = fusion->mutable_operand(0);
EXPECT_EQ(add->opcode(), HloOpcode::kAdd);
EXPECT_EQ(instruction_fusion.EvaluateEmittedInstructions(add, fusion), 16);
EXPECT_TRUE(instruction_fusion.GetFusionNodeEvaluation(fusion)
->CodeDuplicationTooHigh(add));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_node_indexing_evaluation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/fusion_node_indexing_evaluation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03b2db7f-8707-47be-955c-5eccfd5e8710 | cpp | tensorflow/tensorflow | bincount_op | tensorflow/compiler/tf2xla/kernels/bincount_op.cc | tensorflow/core/kernels/bincount_op_test.cc | #include <memory>
#include <vector>
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/shape_util.h"
namespace tensorflow {
namespace {
class DenseBincountOp : public XlaOpKernel {
public:
explicit DenseBincountOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {
(void)ctx->GetAttr("binary_output", &binary_output_);
}
private:
bool binary_output_ = false;
void Compile(XlaOpKernelContext* ctx) override {
int64_t output_size;
xla::XlaOp output_size_param = ctx->Input("size");
absl::StatusOr<xla::Shape> output_shape_or =
ctx->builder()->GetShape(output_size_param);
OP_REQUIRES_OK(ctx, output_shape_or.status());
auto output_shape_param = output_shape_or.value();
auto output_rank = output_shape_param.rank();
OP_REQUIRES(ctx, output_rank == 0,
errors::InvalidArgument("Shape must be rank 0 but is rank ",
output_rank));
OP_REQUIRES_OK(ctx, ctx->ConstantInputAsIntScalar("size", &output_size));
OP_REQUIRES(ctx, output_size >= 0,
errors::InvalidArgument("size (", output_size,
") must be non-negative"));
xla::XlaOp idx, updates, output;
xla::XlaOp input = ctx->Input(0);
auto input_xla_type = ctx->input_xla_type(0);
xla::PrimitiveType dtype = ctx->InputXlaType("weights");
auto zero = xla::Zero(ctx->builder(), dtype);
auto one = xla::One(ctx->builder(), dtype);
absl::StatusOr<xla::Shape> input_shape_or = ctx->builder()->GetShape(input);
OP_REQUIRES_OK(ctx, input_shape_or.status());
auto input_shape = input_shape_or.value();
auto rank = input_shape.rank();
OP_REQUIRES(ctx, rank <= 2,
errors::InvalidArgument(
"Shape must be at most rank 2 but is rank ", rank));
xla::XlaOp weights = ctx->Input(2);
absl::StatusOr<xla::Shape> weights_shape_or =
ctx->builder()->GetShape(weights);
OP_REQUIRES_OK(ctx, weights_shape_or.status());
auto weights_shape = weights_shape_or.value();
OP_REQUIRES(ctx,
xla::ShapeUtil::CompatibleIgnoringElementType(weights_shape,
input_shape) ||
(weights_shape.dimensions_size() > 0 &&
weights_shape.dimensions(0) == 0),
errors::InvalidArgument(
"`weights` must be the same shape as `arr` or a length-0 "
"`Tensor`, in which case it acts as all weights equal to "
"1. Received ",
weights_shape.DebugString()));
auto size = input_shape.dimensions(0);
if (!size) {
output = xla::Broadcast(zero, {output_size});
ctx->SetOutput(0, output);
return;
}
auto weights_size = weights_shape.dimensions(0);
bool has_weights = false;
if (weights_size) {
has_weights = true;
}
xla::Shape output_shape = xla::ShapeUtil::MakeShape(dtype, {output_size});
xla::ScatterDimensionNumbers scatter_dnums;
scatter_dnums.set_index_vector_dim(1);
scatter_dnums.add_inserted_window_dims(0);
scatter_dnums.add_scatter_dims_to_operand_dims(0);
if (rank == 2) {
output_shape = xla::ShapeUtil::MakeShape(dtype, {size, output_size});
scatter_dnums.add_inserted_window_dims(1);
scatter_dnums.add_scatter_dims_to_operand_dims(1);
auto i_shape =
xla::ShapeUtil::MakeShape(input_xla_type, {input_shape.dimensions()});
auto i = xla::Iota(ctx->builder(), i_shape, 0);
i = xla::Reshape(
i, {input_shape.dimensions(0) * input_shape.dimensions(1), 1});
auto j = xla::Reshape(
input, {input_shape.dimensions(0) * input_shape.dimensions(1), 1});
std::vector<xla::XlaOp> iotas_to_concat;
iotas_to_concat.push_back(i);
iotas_to_concat.push_back(j);
idx = xla::ConcatInDim(ctx->builder(), iotas_to_concat, 1);
updates = xla::Broadcast(
one, {input_shape.dimensions(0) * input_shape.dimensions(1)});
output = xla::Broadcast(
zero, {output_shape.dimensions(0), output_shape.dimensions(1)});
if (has_weights && !binary_output_) {
weights = xla::Reshape(
weights, {input_shape.dimensions(0) * input_shape.dimensions(1)});
updates = weights;
}
} else {
input = xla::Reshape(input, {size, 1});
idx = xla::Reshape(input, {size, 1});
updates = xla::Broadcast(one, {size});
output = xla::Broadcast(zero, {output_size});
if (has_weights && !binary_output_) {
updates = weights;
}
}
xla::XlaComputation assn_computation = [&] {
std::unique_ptr<xla::XlaBuilder> subb =
ctx->builder()->CreateSubBuilder("scatter_bincount");
xla::Shape param_shape = xla::ShapeUtil::MakeShape(dtype, {});
auto p0 = xla::Parameter(subb.get(), 0, param_shape, "p0");
auto p1 = xla::Parameter(subb.get(), 1, param_shape, "p1");
if (!binary_output_) {
xla::Add(p0, p1);
}
return subb->BuildAndNoteError();
}();
output = xla::Scatter(output, idx, updates, assn_computation, scatter_dnums,
false, false);
ctx->SetOutput(0, output);
}
};
REGISTER_XLA_OP(Name("DenseBincount").CompileTimeConstantInput("size"),
DenseBincountOp);
REGISTER_XLA_OP(Name("Bincount").CompileTimeConstantInput("size"),
DenseBincountOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* Bincount(int arr_size, int nbins) {
Graph* g = new Graph(OpRegistry::Global());
Tensor arr(DT_INT32, TensorShape({arr_size}));
arr.flat<int32>() = arr.flat<int32>().setRandom().abs();
Tensor size(DT_INT32, TensorShape({static_cast<int32>(1)}));
size.flat<int32>()(0) = static_cast<int32>(nbins);
Tensor weights(DT_INT32, TensorShape({0}));
Node* node;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "Bincount")
.Input(test::graph::Constant(g, arr))
.Input(test::graph::Constant(g, size))
.Input(test::graph::Constant(g, weights))
.Attr("T", DT_INT32)
.Finalize(g, &node));
return g;
}
#define BM_BincountDev(K, NBINS, type) \
static void BM_Bincount##_##type##_##K##_##NBINS( \
::testing::benchmark::State& state) { \
test::Benchmark(#type, Bincount(K * 1024, NBINS), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * K * \
1024); \
} \
BENCHMARK(BM_Bincount##_##type##_##K##_##NBINS);
BM_BincountDev(32, 1000, cpu);
BM_BincountDev(32, 2000, cpu);
BM_BincountDev(32, 5000, cpu);
BM_BincountDev(64, 1000, cpu);
BM_BincountDev(64, 2000, cpu);
BM_BincountDev(64, 5000, cpu);
BM_BincountDev(128, 1000, cpu);
BM_BincountDev(128, 2000, cpu);
BM_BincountDev(128, 5000, cpu);
BM_BincountDev(32, 1000, gpu);
BM_BincountDev(32, 2000, gpu);
BM_BincountDev(32, 5000, gpu);
BM_BincountDev(64, 1000, gpu);
BM_BincountDev(64, 2000, gpu);
BM_BincountDev(64, 5000, gpu);
BM_BincountDev(128, 1000, gpu);
BM_BincountDev(128, 2000, gpu);
BM_BincountDev(128, 5000, gpu);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/bincount_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/bincount_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8d39f7fb-274f-498c-acfb-4154cac83495 | cpp | tensorflow/tensorflow | stream_pool | third_party/xla/xla/service/stream_pool.cc | third_party/xla/xla/service/stream_pool_test.cc | #include "xla/service/stream_pool.h"
#include <memory>
#include <utility>
#include "absl/strings/str_format.h"
namespace xla {
StreamPool::Ptr StreamPool::BorrowStream(se::StreamPriority priority) {
std::unique_ptr<se::Stream> stream;
{
absl::MutexLock lock(&mu_);
if (streams_with_pri_.find(priority) == streams_with_pri_.end()) {
stream = nullptr;
} else {
while (!streams_with_pri_[priority].empty() && !stream) {
stream = std::move(streams_with_pri_[priority].back());
streams_with_pri_[priority].pop_back();
if (stream->ok()) {
VLOG(1) << absl::StrFormat(
"StreamPool reusing existing stream (%p) with priority: %s",
stream.get(), se::StreamPriorityToString(priority));
} else {
VLOG(1) << absl::StrFormat(
"Stream (%p) was not ok, deleting with : %s", stream.get(),
se::StreamPriorityToString(priority));
stream = nullptr;
}
}
}
}
if (!stream) {
stream = executor_->CreateStream(priority).value();
stream->set_name(absl::StrFormat("%s pool stream",
se::StreamPriorityToString(priority)));
VLOG(1) << absl::StrFormat("Created new stream (%p) with priority = %s",
stream.get(),
se::StreamPriorityToString(priority));
}
PtrDeleter deleter = {this};
return Ptr(stream.release(), deleter);
}
void StreamPool::ReturnStream(se::Stream* stream) {
if (stream->ok()) {
VLOG(1) << absl::StrFormat("StreamPool returning ok stream (%p)", stream);
absl::MutexLock lock(&mu_);
auto priority = std::get<se::StreamPriority>(stream->priority());
streams_with_pri_[priority].emplace_back(stream);
} else {
VLOG(1) << absl::StrFormat("StreamPool deleting !ok stream (%p)", stream);
delete stream;
}
}
} | #include "xla/service/stream_pool.h"
#include <memory>
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/test_helpers.h"
namespace xla {
namespace {
class StreamPoolTest : public ::testing::Test {
protected:
se::StreamExecutor* NewStreamExecutor() {
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
return platform->ExecutorForDevice(0).value();
}
};
TEST_F(StreamPoolTest, EmptyPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
}
TEST_F(StreamPoolTest, OneStreamPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
stream1 = nullptr;
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
stream2 = nullptr;
EXPECT_EQ(stream1_ptr, stream2_ptr);
}
TEST_F(StreamPoolTest, TwoStreamPool) {
se::StreamExecutor* executor = NewStreamExecutor();
StreamPool pool(executor);
StreamPool::Ptr stream1 = pool.BorrowStream();
se::Stream* stream1_ptr = stream1.get();
EXPECT_TRUE(stream1->ok());
StreamPool::Ptr stream2 = pool.BorrowStream();
se::Stream* stream2_ptr = stream2.get();
EXPECT_TRUE(stream2->ok());
EXPECT_NE(stream1_ptr, stream2_ptr);
stream1 = nullptr;
StreamPool::Ptr stream3 = pool.BorrowStream();
se::Stream* stream3_ptr = stream3.get();
EXPECT_TRUE(stream3->ok());
EXPECT_EQ(stream1_ptr, stream3_ptr);
EXPECT_NE(stream2_ptr, stream3_ptr);
stream2 = nullptr;
StreamPool::Ptr stream4 = pool.BorrowStream();
se::Stream* stream4_ptr = stream4.get();
EXPECT_TRUE(stream4->ok());
EXPECT_EQ(stream2_ptr, stream4_ptr);
EXPECT_NE(stream3_ptr, stream4_ptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/stream_pool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d840c5d5-dfe8-4ccf-a118-82e261fecd8b | cpp | google/tensorstore | utf8_string | tensorstore/util/utf8_string.cc | tensorstore/util/utf8_string_test.cc | #include "tensorstore/util/utf8_string.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/internal/utf8.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
bool Serializer<Utf8String>::Encode(EncodeSink& sink, const Utf8String& value) {
return serialization::WriteDelimited(sink.writer(), value.utf8);
}
bool Serializer<Utf8String>::Decode(DecodeSource& source, Utf8String& value) {
return serialization::ReadDelimitedUtf8(source.reader(), value.utf8);
}
}
} | #include "tensorstore/util/utf8_string.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::Utf8String;
using ::tensorstore::serialization::SerializationRoundTrip;
using ::tensorstore::serialization::TestSerializationRoundTrip;
TEST(SerializationTest, Valid) {
TestSerializationRoundTrip(Utf8String{""});
TestSerializationRoundTrip(Utf8String{"abc"});
TestSerializationRoundTrip(Utf8String{"\xc2\x80hello\xc2\xbf"});
}
TEST(SerializationTest, Invalid) {
EXPECT_THAT(SerializationRoundTrip(Utf8String{"\xC1"}),
MatchesStatus(absl::StatusCode::kDataLoss,
"String is not valid utf-8: .*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/utf8_string.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/utf8_string_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
8a284c9a-cb16-42f0-bc10-a7d4c1d598ae | cpp | tensorflow/tensorflow | str_util | third_party/xla/third_party/tsl/tsl/platform/str_util.cc | third_party/xla/third_party/tsl/tsl/platform/str_util_test.cc | #include "tsl/platform/str_util.h"
#include <cctype>
#include <cstdint>
#include <string>
#include "absl/strings/ascii.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/stringpiece.h"
namespace tsl {
namespace str_util {
size_t RemoveLeadingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripLeadingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveTrailingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripTrailingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveWhitespaceContext(absl::string_view* text) {
absl::string_view new_text = absl::StripAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
bool ConsumeLeadingDigits(absl::string_view* s, uint64_t* val) {
const char* p = s->data();
const char* limit = p + s->size();
uint64_t v = 0;
while (p < limit) {
const char c = *p;
if (c < '0' || c > '9') break;
uint64_t new_v = (v * 10) + (c - '0');
if (new_v / 8 < v) {
return false;
}
v = new_v;
p++;
}
if (p > s->data()) {
s->remove_prefix(p - s->data());
*val = v;
return true;
} else {
return false;
}
}
bool ConsumeNonWhitespace(absl::string_view* s, absl::string_view* val) {
const char* p = s->data();
const char* limit = p + s->size();
while (p < limit) {
const char c = *p;
if (isspace(c)) break;
p++;
}
const size_t n = p - s->data();
if (n > 0) {
*val = absl::string_view(s->data(), n);
s->remove_prefix(n);
return true;
} else {
*val = absl::string_view();
return false;
}
}
void TitlecaseString(string* s, absl::string_view delimiters) {
bool upper = true;
for (string::iterator ss = s->begin(); ss != s->end(); ++ss) {
if (upper) {
*ss = toupper(*ss);
}
upper = (delimiters.find(*ss) != absl::string_view::npos);
}
}
string StringReplace(absl::string_view s, absl::string_view oldsub,
absl::string_view newsub, bool replace_all) {
string res(s);
size_t pos = 0;
while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) {
res.replace(pos, oldsub.size(), newsub.data(), newsub.size());
pos += newsub.size();
if (oldsub.empty()) {
pos++;
}
if (!replace_all) {
break;
}
}
return res;
}
size_t Strnlen(const char* str, const size_t string_max_len) {
size_t len = 0;
while (len < string_max_len && str[len] != '\0') {
++len;
}
return len;
}
string ArgDefCase(absl::string_view s) {
const size_t n = s.size();
size_t extra_us = 0;
size_t to_skip = 0;
for (size_t i = 0; i < n; ++i) {
if (i == to_skip && !isalpha(s[i])) {
++to_skip;
continue;
}
if (isupper(s[i]) && i != to_skip && i > 0 && isalnum(s[i - 1])) {
++extra_us;
}
}
string result(n + extra_us - to_skip, '_');
for (size_t i = to_skip, j = 0; i < n; ++i, ++j) {
DCHECK_LT(j, result.size());
char c = s[i];
if (isalnum(c)) {
if (isupper(c)) {
if (i != to_skip) {
DCHECK_GT(j, 0);
if (result[j - 1] != '_') ++j;
}
result[j] = tolower(c);
} else {
result[j] = c;
}
}
}
return result;
}
}
} | #include "tsl/platform/str_util.h"
#include <vector>
#include "tsl/platform/test.h"
namespace tsl {
TEST(CEscape, Basic) {
EXPECT_EQ(absl::CEscape("hello"), "hello");
EXPECT_EQ(absl::CEscape("hello\n"), "hello\\n");
EXPECT_EQ(absl::CEscape("hello\r"), "hello\\r");
EXPECT_EQ(absl::CEscape("\t\r\"'"), "\\t\\r\\\"\\'");
EXPECT_EQ(absl::CEscape("\320hi\200"), "\\320hi\\200");
}
string ExpectCUnescapeSuccess(absl::string_view source) {
string dest;
string error;
EXPECT_TRUE(absl::CUnescape(source, &dest, &error)) << error;
return dest;
}
TEST(CUnescape, Basic) {
EXPECT_EQ("hello", ExpectCUnescapeSuccess("hello"));
EXPECT_EQ("hello\n", ExpectCUnescapeSuccess("hello\\n"));
EXPECT_EQ("hello\r", ExpectCUnescapeSuccess("hello\\r"));
EXPECT_EQ("\t\r\"'", ExpectCUnescapeSuccess("\\t\\r\\\"\\'"));
EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200"));
}
TEST(CUnescape, HandlesCopyOnWriteStrings) {
string dest = "hello";
string read = dest;
string error;
absl::string_view source = "llohe";
EXPECT_TRUE(absl::CUnescape(source, &dest, &error));
EXPECT_EQ("hello", read);
}
TEST(StripTrailingWhitespace, Basic) {
string test;
test = "hello";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "hello");
test = "foo ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "foo");
test = " ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = "";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = " abc\t";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, " abc");
}
TEST(RemoveLeadingWhitespace, Basic) {
string text = " \t \n \r Quick\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 11);
EXPECT_EQ(data, absl::string_view("Quick\t"));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick\t"));
}
TEST(RemoveLeadingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveTrailingWhitespace, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 2);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
}
TEST(RemoveTrailingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveWhitespaceContext, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 13);
EXPECT_EQ(data, absl::string_view("Quick"));
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick"));
text = "";
data = text;
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
void TestConsumeLeadingDigits(absl::string_view s, int64_t expected,
absl::string_view remaining) {
uint64 v;
absl::string_view input(s);
if (str_util::ConsumeLeadingDigits(&input, &v)) {
EXPECT_EQ(v, static_cast<uint64>(expected));
EXPECT_EQ(input, remaining);
} else {
EXPECT_LT(expected, 0);
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeLeadingDigits, Basic) {
using str_util::ConsumeLeadingDigits;
TestConsumeLeadingDigits("123", 123, "");
TestConsumeLeadingDigits("a123", -1, "a123");
TestConsumeLeadingDigits("9_", 9, "_");
TestConsumeLeadingDigits("11111111111xyz", 11111111111ll, "xyz");
TestConsumeLeadingDigits("1111111111111111111111111111111xyz", -1,
"1111111111111111111111111111111xyz");
TestConsumeLeadingDigits("18446744073709551616xyz", -1,
"18446744073709551616xyz");
TestConsumeLeadingDigits("18446744073709551615xyz", 18446744073709551615ull,
"xyz");
TestConsumeLeadingDigits("184467440737095516159yz", -1,
"184467440737095516159yz");
}
void TestConsumeNonWhitespace(absl::string_view s, absl::string_view expected,
absl::string_view remaining) {
absl::string_view v;
absl::string_view input(s);
if (str_util::ConsumeNonWhitespace(&input, &v)) {
EXPECT_EQ(v, expected);
EXPECT_EQ(input, remaining);
} else {
EXPECT_EQ(expected, "");
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeNonWhitespace, Basic) {
TestConsumeNonWhitespace("", "", "");
TestConsumeNonWhitespace(" ", "", " ");
TestConsumeNonWhitespace("abc", "abc", "");
TestConsumeNonWhitespace("abc ", "abc", " ");
}
TEST(ConsumePrefix, Basic) {
string s("abcdef");
absl::string_view input(s);
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdefg"));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abce"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, ""));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdeg"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcdef"));
EXPECT_EQ(input, "");
input = s;
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcde"));
EXPECT_EQ(input, "f");
}
TEST(StripPrefix, Basic) {
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdefg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abce"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", ""), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdeg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdef"), "");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcde"), "f");
}
TEST(JoinStrings, Basic) {
std::vector<string> s;
s = {"hi"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi");
s = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi there strings");
std::vector<absl::string_view> sp;
sp = {"hi"};
EXPECT_EQ(absl::StrJoin(sp, ",,"), "hi");
sp = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(sp, "--"), "hi--there--strings");
}
TEST(JoinStrings, Join3) {
std::vector<string> s;
s = {"hi"};
auto l1 = [](string* out, string s) { *out += s; };
EXPECT_EQ(str_util::Join(s, " ", l1), "hi");
s = {"hi", "there", "strings"};
auto l2 = [](string* out, string s) { *out += s[0]; };
EXPECT_EQ(str_util::Join(s, " ", l2), "h t s");
}
TEST(Split, Basic) {
EXPECT_TRUE(str_util::Split("", ',').empty());
EXPECT_EQ(absl::StrJoin(str_util::Split("a", ','), "|"), "a");
EXPECT_EQ(absl::StrJoin(str_util::Split(",", ','), "|"), "|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,b,c", ','), "|"), "a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,,,b,,c,", ','), "|"), "a|||b||c|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a!,!b,!c,", ",!"), "|"),
"a|||b||c|");
EXPECT_EQ(absl::StrJoin(
str_util::Split("a,,,b,,c,", ',', str_util::SkipEmpty()), "|"),
"a|b|c");
EXPECT_EQ(
absl::StrJoin(
str_util::Split("a, ,b,,c,", ',', str_util::SkipWhitespace()), "|"),
"a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a. !b,;c,", ".,;!",
str_util::SkipWhitespace()),
"|"),
"a|b|c");
}
TEST(Lowercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToLower(""));
EXPECT_EQ("hello", absl::AsciiStrToLower("hello"));
EXPECT_EQ("hello world", absl::AsciiStrToLower("Hello World"));
}
TEST(Uppercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToUpper(""));
EXPECT_EQ("HELLO", absl::AsciiStrToUpper("hello"));
EXPECT_EQ("HELLO WORLD", absl::AsciiStrToUpper("Hello World"));
}
TEST(SnakeCase, Basic) {
EXPECT_EQ("", str_util::ArgDefCase(""));
EXPECT_EQ("", str_util::ArgDefCase("!"));
EXPECT_EQ("", str_util::ArgDefCase("5"));
EXPECT_EQ("", str_util::ArgDefCase("!:"));
EXPECT_EQ("", str_util::ArgDefCase("5-5"));
EXPECT_EQ("", str_util::ArgDefCase("_!"));
EXPECT_EQ("", str_util::ArgDefCase("_5"));
EXPECT_EQ("a", str_util::ArgDefCase("_a"));
EXPECT_EQ("a", str_util::ArgDefCase("_A"));
EXPECT_EQ("i", str_util::ArgDefCase("I"));
EXPECT_EQ("i", str_util::ArgDefCase("i"));
EXPECT_EQ("i_", str_util::ArgDefCase("I%"));
EXPECT_EQ("i_", str_util::ArgDefCase("i%"));
EXPECT_EQ("i", str_util::ArgDefCase("%I"));
EXPECT_EQ("i", str_util::ArgDefCase("-i"));
EXPECT_EQ("i", str_util::ArgDefCase("3i"));
EXPECT_EQ("i", str_util::ArgDefCase("32i"));
EXPECT_EQ("i3", str_util::ArgDefCase("i3"));
EXPECT_EQ("i_a3", str_util::ArgDefCase("i_A3"));
EXPECT_EQ("i_i", str_util::ArgDefCase("II"));
EXPECT_EQ("i_i", str_util::ArgDefCase("I_I"));
EXPECT_EQ("i__i", str_util::ArgDefCase("I__I"));
EXPECT_EQ("i_i_32", str_util::ArgDefCase("II-32"));
EXPECT_EQ("ii_32", str_util::ArgDefCase("Ii-32"));
EXPECT_EQ("hi_there", str_util::ArgDefCase("HiThere"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi!Hi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("HiHi"));
EXPECT_EQ("hihi", str_util::ArgDefCase("Hihi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi_Hi"));
}
TEST(TitlecaseString, Basic) {
string s = "sparse_lookup";
str_util::TitlecaseString(&s, "_");
ASSERT_EQ(s, "Sparse_Lookup");
s = "sparse_lookup";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Sparse_lookup");
s = "dense";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Dense");
}
TEST(StringReplace, Basic) {
EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
true));
}
TEST(StringReplace, OnlyFirst) {
EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
false));
}
TEST(StringReplace, IncreaseLength) {
EXPECT_EQ("a b c",
str_util::StringReplace("abc", "b", " b ", true));
}
TEST(StringReplace, IncreaseLengthMultipleMatches) {
EXPECT_EQ("a b b c",
str_util::StringReplace("abbc", "b", " b ", true));
}
TEST(StringReplace, NoChange) {
EXPECT_EQ("abc",
str_util::StringReplace("abc", "d", "X", true));
}
TEST(StringReplace, EmptyStringReplaceFirst) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", false));
}
TEST(StringReplace, EmptyStringReplaceAll) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", true));
}
TEST(Strnlen, Basic) {
EXPECT_EQ(0, str_util::Strnlen("ab", 0));
EXPECT_EQ(1, str_util::Strnlen("a", 1));
EXPECT_EQ(2, str_util::Strnlen("abcd", 2));
EXPECT_EQ(3, str_util::Strnlen("abc", 10));
EXPECT_EQ(4, str_util::Strnlen("a \t\n", 10));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/str_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/str_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f22e62fc-1f6e-4485-be68-31adc5cae6ff | cpp | tensorflow/tensorflow | time | tensorflow/lite/profiling/time.cc | tensorflow/lite/profiling/time_test.cc | #include "tensorflow/lite/profiling/time.h"
#if defined(_MSC_VER)
#include <chrono>
#include <thread>
#else
#include <sys/time.h>
#include <time.h>
#endif
namespace tflite {
namespace profiling {
namespace time {
#if defined(_MSC_VER)
uint64_t NowMicros() {
return static_cast<uint64_t>(
std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count());
}
void SleepForMicros(uint64_t micros) {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
#else
uint64_t NowMicros() {
#if defined(__APPLE__)
return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW) / 1e3;
#else
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<uint64_t>(ts.tv_sec) * 1e6 +
static_cast<uint64_t>(ts.tv_nsec) / 1e3;
#endif
}
void SleepForMicros(uint64_t micros) {
timespec sleep_time;
sleep_time.tv_sec = micros / 1e6;
micros -= sleep_time.tv_sec * 1e6;
sleep_time.tv_nsec = micros * 1e3;
nanosleep(&sleep_time, nullptr);
}
#endif
}
}
} | #include "tensorflow/lite/profiling/time.h"
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace time {
TEST(TimeTest, NowMicros) {
auto now0 = NowMicros();
EXPECT_GT(now0, 0);
auto now1 = NowMicros();
EXPECT_GE(now1, now0);
}
TEST(TimeTest, SleepForMicros) {
SleepForMicros(0);
auto now0 = NowMicros();
SleepForMicros(50);
auto now1 = NowMicros();
EXPECT_GE(now1, now0 + 50);
now0 = NowMicros();
SleepForMicros(1e6 + 50);
now1 = NowMicros();
EXPECT_GE(now1, now0 + 1e6 + 50);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/time.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/time_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fe75d615-573a-4be2-9609-b996505d1261 | cpp | google/tensorstore | memory_key_value_store | tensorstore/kvstore/memory/memory_key_value_store.cc | tensorstore/kvstore/memory/memory_key_value_store_test.cc | #include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/btree_map.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::internal_kvstore::DeleteRangeEntry;
using ::tensorstore::internal_kvstore::kReadModifyWrite;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::kvstore::SupportedFeatures;
TimestampedStorageGeneration GenerationNow(StorageGeneration generation) {
return TimestampedStorageGeneration{std::move(generation), absl::Now()};
}
struct StoredKeyValuePairs
: public internal::AtomicReferenceCount<StoredKeyValuePairs> {
using Ptr = internal::IntrusivePtr<StoredKeyValuePairs>;
struct ValueWithGenerationNumber {
absl::Cord value;
uint64_t generation_number;
StorageGeneration generation() const {
return StorageGeneration::FromUint64(generation_number);
}
};
using Map = absl::btree_map<std::string, ValueWithGenerationNumber>;
std::pair<Map::iterator, Map::iterator> Find(const std::string& inclusive_min,
const std::string& exclusive_max)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return {values.lower_bound(inclusive_min),
exclusive_max.empty() ? values.end()
: values.lower_bound(exclusive_max)};
}
std::pair<Map::iterator, Map::iterator> Find(const KeyRange& range)
ABSL_SHARED_LOCKS_REQUIRED(mutex) {
return Find(range.inclusive_min, range.exclusive_max);
}
absl::Mutex mutex;
uint64_t next_generation_number ABSL_GUARDED_BY(mutex) = 0;
Map values ABSL_GUARDED_BY(mutex);
};
struct MemoryKeyValueStoreResource
: public internal::ContextResourceTraits<MemoryKeyValueStoreResource> {
constexpr static char id[] = "memory_key_value_store";
struct Spec {};
using Resource = StoredKeyValuePairs::Ptr;
static Spec Default() { return {}; }
static constexpr auto JsonBinder() { return jb::Object(); }
static Result<Resource> Create(
Spec, internal::ContextResourceCreationContext context) {
return StoredKeyValuePairs::Ptr(new StoredKeyValuePairs);
}
static Spec GetSpec(const Resource&,
const internal::ContextSpecBuilder& builder) {
return {};
}
};
const internal::ContextResourceRegistration<MemoryKeyValueStoreResource>
resource_registration;
struct MemoryDriverSpecData {
Context::Resource<MemoryKeyValueStoreResource> memory_key_value_store;
bool atomic = true;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.memory_key_value_store, x.atomic);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
MemoryKeyValueStoreResource::id,
jb::Projection<&MemoryDriverSpecData::memory_key_value_store>()),
jb::Member("atomic", jb::Projection<&MemoryDriverSpecData::atomic>(
jb::DefaultValue([](auto* y) { *y = true; }))));
};
class MemoryDriverSpec
: public internal_kvstore::RegisteredDriverSpec<MemoryDriverSpec,
MemoryDriverSpecData> {
public:
static constexpr char id[] = "memory";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return tensorstore::StrCat(id, ":
}
};
class MemoryDriver
: public internal_kvstore::RegisteredDriver<MemoryDriver,
MemoryDriverSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override;
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) override;
class TransactionNode;
StoredKeyValuePairs& data() { return **spec_.memory_key_value_store; }
absl::Status GetBoundSpecData(MemoryDriverSpecData& spec) const {
spec = spec_;
return absl::Status();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
SpecData spec_;
};
Future<kvstore::DriverPtr> MemoryDriverSpec::DoOpen() const {
auto driver = internal::MakeIntrusivePtr<MemoryDriver>();
driver->spec_ = data_;
return driver;
}
using BufferedReadModifyWriteEntry =
internal_kvstore::AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry;
class MemoryDriver::TransactionNode
: public internal_kvstore::AtomicTransactionNode {
using Base = internal_kvstore::AtomicTransactionNode;
public:
using Base::Base;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override
ABSL_NO_THREAD_SAFETY_ANALYSIS {
if (!single_phase_mutation.remaining_entries_.HasError()) {
auto& data = static_cast<MemoryDriver&>(*this->driver()).data();
TimestampedStorageGeneration generation;
UniqueWriterLock lock(data.mutex);
absl::Time commit_time = absl::Now();
if (!ValidateEntryConditions(data, single_phase_mutation, commit_time)) {
lock.unlock();
this->RetryAtomicWriteback(commit_time);
return;
}
ApplyMutation(data, single_phase_mutation, commit_time);
lock.unlock();
this->AtomicCommitWritebackSuccess();
} else {
internal_kvstore::WritebackError(single_phase_mutation);
}
MultiPhaseMutation::AllEntriesDone(single_phase_mutation);
}
static bool ValidateEntryConditions(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
bool validated = true;
for (auto& entry : single_phase_mutation.entries_) {
if (!ValidateEntryConditions(data, entry, commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
internal_kvstore::MutationEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
if (entry.entry_type() == kReadModifyWrite) {
return ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(entry), commit_time);
}
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
bool validated = true;
for (auto& deleted_entry : dr_entry.superseded_) {
if (!ValidateEntryConditions(
data, static_cast<BufferedReadModifyWriteEntry&>(deleted_entry),
commit_time)) {
validated = false;
}
}
return validated;
}
static bool ValidateEntryConditions(StoredKeyValuePairs& data,
BufferedReadModifyWriteEntry& entry,
const absl::Time& commit_time)
ABSL_SHARED_LOCKS_REQUIRED(data.mutex) {
auto& stamp = entry.stamp();
auto if_equal = StorageGeneration::Clean(stamp.generation);
if (StorageGeneration::IsUnknown(if_equal)) {
assert(stamp.time == absl::InfiniteFuture());
return true;
}
auto it = data.values.find(entry.key_);
if (it == data.values.end()) {
if (StorageGeneration::IsNoValue(if_equal)) {
stamp.time = commit_time;
return true;
}
} else if (if_equal == it->second.generation()) {
stamp.time = commit_time;
return true;
}
return false;
}
static void ApplyMutation(
StoredKeyValuePairs& data,
internal_kvstore::SinglePhaseMutation& single_phase_mutation,
const absl::Time& commit_time) ABSL_EXCLUSIVE_LOCKS_REQUIRED(data.mutex) {
for (auto& entry : single_phase_mutation.entries_) {
if (entry.entry_type() == kReadModifyWrite) {
auto& rmw_entry = static_cast<BufferedReadModifyWriteEntry&>(entry);
auto& stamp = rmw_entry.stamp();
stamp.time = commit_time;
auto value_state = rmw_entry.value_state_;
if (!StorageGeneration::IsDirty(stamp.generation)) {
} else if (value_state == ReadResult::kMissing) {
data.values.erase(rmw_entry.key_);
stamp.generation = StorageGeneration::NoValue();
} else {
assert(value_state == ReadResult::kValue);
auto& v = data.values[rmw_entry.key_];
v.generation_number = data.next_generation_number++;
v.value = std::move(rmw_entry.value_);
stamp.generation = v.generation();
}
} else {
auto& dr_entry = static_cast<DeleteRangeEntry&>(entry);
auto it_range = data.Find(dr_entry.key_, dr_entry.exclusive_max_);
data.values.erase(it_range.first, it_range.second);
}
}
}
};
Future<ReadResult> MemoryDriver::Read(Key key, ReadOptions options) {
auto& data = this->data();
absl::ReaderMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
return ReadResult::Missing(GenerationNow(StorageGeneration::NoValue()));
}
auto stamp = GenerationNow(it->second.generation());
if (!options.generation_conditions.Matches(it->second.generation())) {
return ReadResult::Unspecified(std::move(stamp));
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, options.byte_range.Validate(it->second.value.size()));
return ReadResult::Value(internal::GetSubCord(it->second.value, byte_range),
std::move(stamp));
}
Future<TimestampedStorageGeneration> MemoryDriver::Write(
Key key, std::optional<Value> value, WriteOptions options) {
using ValueWithGenerationNumber =
StoredKeyValuePairs::ValueWithGenerationNumber;
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
auto& values = data.values;
auto it = values.find(key);
if (it == values.end()) {
if (!options.generation_conditions.MatchesNoValue()) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
return GenerationNow(StorageGeneration::NoValue());
}
it = values
.emplace(std::move(key),
ValueWithGenerationNumber{*std::move(value),
data.next_generation_number++})
.first;
return GenerationNow(it->second.generation());
}
if (!options.generation_conditions.Matches(it->second.generation())) {
return GenerationNow(StorageGeneration::Unknown());
}
if (!value) {
values.erase(it);
return GenerationNow(StorageGeneration::NoValue());
}
it->second.generation_number = data.next_generation_number++;
it->second.value = *std::move(value);
return GenerationNow(it->second.generation());
}
Future<const void> MemoryDriver::DeleteRange(KeyRange range) {
auto& data = this->data();
absl::WriterMutexLock lock(&data.mutex);
if (!range.empty()) {
auto it_range = data.Find(range);
data.values.erase(it_range.first, it_range.second);
}
return absl::OkStatus();
}
void MemoryDriver::ListImpl(ListOptions options, ListReceiver receiver) {
auto& data = this->data();
std::atomic<bool> cancelled{false};
execution::set_starting(receiver, [&cancelled] {
cancelled.store(true, std::memory_order_relaxed);
});
std::vector<ListEntry> entries;
{
absl::ReaderMutexLock lock(&data.mutex);
auto it_range = data.Find(options.range);
for (auto it = it_range.first; it != it_range.second; ++it) {
if (cancelled.load(std::memory_order_relaxed)) break;
std::string_view key = it->first;
entries.push_back(ListEntry{
std::string(
key.substr(std::min(options.strip_prefix_length, key.size()))),
ListEntry::checked_size(it->second.value.size()),
});
}
}
for (auto& entry : entries) {
if (cancelled.load(std::memory_order_relaxed)) break;
execution::set_value(receiver, std::move(entry));
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
absl::Status MemoryDriver::ReadModifyWrite(
internal::OpenTransactionPtr& transaction, size_t& phase, Key key,
ReadModifyWriteSource& source) {
if (!spec_.atomic) {
return Driver::ReadModifyWrite(transaction, phase, std::move(key), source);
}
return internal_kvstore::AddReadModifyWrite<TransactionNode>(
this, transaction, phase, std::move(key), source);
}
absl::Status MemoryDriver::TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction, KeyRange range) {
if (!spec_.atomic) {
return Driver::TransactionalDeleteRange(transaction, std::move(range));
}
return internal_kvstore::AddDeleteRange<TransactionNode>(this, transaction,
std::move(range));
}
Result<kvstore::Spec> ParseMemoryUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == tensorstore::MemoryDriverSpec::id);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
auto driver_spec = internal::MakeIntrusivePtr<MemoryDriverSpec>();
driver_spec->data_.memory_key_value_store =
Context::Resource<MemoryKeyValueStoreResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec),
internal::PercentDecode(parsed.authority_and_path)};
}
}
kvstore::DriverPtr GetMemoryKeyValueStore(bool atomic) {
auto ptr = internal::MakeIntrusivePtr<MemoryDriver>();
ptr->spec_.memory_key_value_store =
Context::Default().GetResource<MemoryKeyValueStoreResource>().value();
ptr->spec_.atomic = atomic;
return ptr;
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(tensorstore::MemoryDriver)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::MemoryDriverSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{tensorstore::MemoryDriverSpec::id,
tensorstore::ParseMemoryUrl};
} | #include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::Context;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(MemoryKeyValueStoreTest, Basic) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(MemoryKeyValueStoreTest, DeletePrefix) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeToEnd) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(MemoryKeyValueStoreTest, DeleteRangeFromBeginning) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
#if 0
TEST(MemoryKeyValueStoreTest, CopyRange) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreCopyRange(store);
}
#endif
TEST(MemoryKeyValueStoreTest, List) {
auto store = tensorstore::GetMemoryKeyValueStore();
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, Open) {
auto context = Context::Default();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
TENSORSTORE_ASSERT_OK(kvstore::Write(store, "key", absl::Cord("value")));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_THAT(kvstore::Read(store2, "key").result(),
MatchesKvsReadResult(absl::Cord("value")));
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto other_context, Context::FromJson({{"memory_key_value_store",
::nlohmann::json::object_t{}}},
context));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store3,
kvstore::Open({{"driver", "memory"}}, other_context).result());
EXPECT_THAT(kvstore::Read(store3, "key").result(),
MatchesKvsReadResultNotFound());
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store, kvstore::Open({{"driver", "memory"}}, context).result());
EXPECT_EQ("value", kvstore::Read(store, "key").value().value);
}
}
TEST(MemoryKeyValueStoreTest, ListWithPath) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store,
kvstore::Open({{"driver", "memory"}, {"path", "p/"}}, context).result());
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtrip) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {
{"driver", "memory"},
};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, SpecRoundtripWithContextSpec) {
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.spec_request_options.Set(tensorstore::unbind_context).IgnoreError();
options.full_spec = {
{"driver", "memory"},
{"memory_key_value_store", "memory_key_value_store#a"},
{"context",
{
{"memory_key_value_store#a", ::nlohmann::json::object_t()},
}},
};
options.check_data_persists = false;
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(MemoryKeyValueStoreTest, InvalidSpec) {
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "memory"}, {"extra", "key"}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MemoryKeyValueStoreTest, BoundSpec) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK(spec.BindContext(context));
std::string bound_spec_cache_key;
tensorstore::internal::EncodeCacheKey(&bound_spec_cache_key, spec.driver);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store, kvstore::Open(spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_spec,
store.spec(tensorstore::retain_context));
std::string store_cache_key;
tensorstore::internal::EncodeCacheKey(&store_cache_key, store.driver);
EXPECT_EQ(bound_spec_cache_key, store_cache_key);
new_spec.StripContext();
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(json_spec));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2, kvstore::Open(json_spec, context).result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto store2,
kvstore::Open(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store#a", "memory_key_value_store"}}},
{"memory_key_value_store", "memory_key_value_store#a"}},
context)
.result());
std::string store2_cache_key;
tensorstore::internal::EncodeCacheKey(&store2_cache_key, store2.driver);
EXPECT_EQ(store_cache_key, store2_cache_key);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
std::string store3_cache_key;
tensorstore::internal::EncodeCacheKey(&store3_cache_key, store3.driver);
EXPECT_NE(store_cache_key, store3_cache_key);
}
}
TEST(MemoryKeyValueStoreTest, OpenCache) {
auto context = tensorstore::Context::Default();
::nlohmann::json json_spec{{"driver", "memory"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(json_spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store3,
kvstore::Open(json_spec).result());
EXPECT_EQ(store1.driver.get(), store2.driver.get());
EXPECT_NE(store1.driver.get(), store3.driver.get());
std::string cache_key1, cache_key3;
tensorstore::internal::EncodeCacheKey(&cache_key1, store1.driver);
tensorstore::internal::EncodeCacheKey(&cache_key3, store3.driver);
EXPECT_NE(cache_key1, cache_key3);
}
TEST(MemoryKeyValueStoreTest, ContextBinding) {
auto context1 = Context::Default();
auto context2 = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto base_spec, kvstore::Spec::FromJson({{"driver", "memory"}}));
auto base_spec1 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec1.Set(context1));
EXPECT_THAT(
base_spec1.ToJson(),
::testing::Optional(MatchesJson(
{{"driver", "memory"},
{"context",
{{"memory_key_value_store", ::nlohmann::json::object_t()}}}})));
auto base_spec2 = base_spec;
TENSORSTORE_ASSERT_OK(base_spec2.Set(context2));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store1,
kvstore::Open(base_spec, context1).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store2,
kvstore::Open(base_spec, context2).result());
ASSERT_NE(store1.driver, store2.driver);
EXPECT_THAT(kvstore::Open(base_spec1).result(), ::testing::Optional(store1));
EXPECT_THAT(kvstore::Open(base_spec2).result(), ::testing::Optional(store2));
auto base_spec3 = base_spec1;
TENSORSTORE_ASSERT_OK(base_spec3.Set(context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store1));
TENSORSTORE_ASSERT_OK(base_spec3.Set(tensorstore::strip_context, context2));
EXPECT_THAT(kvstore::Open(base_spec3).result(), ::testing::Optional(store2));
}
TEST(MemoryKeyValueStoreTest, SpecSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
kvstore::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
SerializationRoundTrip(spec));
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, KvStoreSerialization) {
::nlohmann::json json_spec{{"driver", "memory"}, {"path", "abc/"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(json_spec).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_roundtripped,
SerializationRoundTrip(store));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_roundtripped,
store_roundtripped.spec());
EXPECT_THAT(spec_roundtripped.ToJson(),
::testing::Optional(MatchesJson(json_spec)));
}
TEST(MemoryKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip({{"driver", "memory"}},
"memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc/"}}, "memory:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "memory"}, {"path", "abc def/"}}, "memory:
}
TEST(MemoryKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("memory:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/memory/memory_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/memory/memory_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0d24ce96-9a8f-48fc-a5df-5aaec4cdd83a | cpp | google/cel-cpp | source | common/source.cc | common/source_test.cc | #include "common/source.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/overload.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "internal/unicode.h"
#include "internal/utf8.h"
namespace cel {
SourcePosition SourceContentView::size() const {
return static_cast<SourcePosition>(absl::visit(
absl::Overload(
[](absl::Span<const char> view) { return view.size(); },
[](absl::Span<const uint8_t> view) { return view.size(); },
[](absl::Span<const char16_t> view) { return view.size(); },
[](absl::Span<const char32_t> view) { return view.size(); }),
view_));
}
bool SourceContentView::empty() const {
return absl::visit(
absl::Overload(
[](absl::Span<const char> view) { return view.empty(); },
[](absl::Span<const uint8_t> view) { return view.empty(); },
[](absl::Span<const char16_t> view) { return view.empty(); },
[](absl::Span<const char32_t> view) { return view.empty(); }),
view_);
}
char32_t SourceContentView::at(SourcePosition position) const {
ABSL_DCHECK_GE(position, 0);
ABSL_DCHECK_LT(position, size());
return absl::visit(
absl::Overload(
[position =
static_cast<size_t>(position)](absl::Span<const char> view) {
return static_cast<char32_t>(static_cast<uint8_t>(view[position]));
},
[position =
static_cast<size_t>(position)](absl::Span<const uint8_t> view) {
return static_cast<char32_t>(view[position]);
},
[position =
static_cast<size_t>(position)](absl::Span<const char16_t> view) {
return static_cast<char32_t>(view[position]);
},
[position =
static_cast<size_t>(position)](absl::Span<const char32_t> view) {
return static_cast<char32_t>(view[position]);
}),
view_);
}
std::string SourceContentView::ToString(SourcePosition begin,
SourcePosition end) const {
ABSL_DCHECK_GE(begin, 0);
ABSL_DCHECK_LE(end, size());
ABSL_DCHECK_LE(begin, end);
return absl::visit(
absl::Overload(
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char> view) {
view = view.subspan(begin, end - begin);
return std::string(view.data(), view.size());
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const uint8_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 2);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char16_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 3);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
},
[begin = static_cast<size_t>(begin),
end = static_cast<size_t>(end)](absl::Span<const char32_t> view) {
view = view.subspan(begin, end - begin);
std::string result;
result.reserve(view.size() * 4);
for (const auto& code_point : view) {
internal::Utf8Encode(result, code_point);
}
result.shrink_to_fit();
return result;
}),
view_);
}
void SourceContentView::AppendToString(std::string& dest) const {
absl::visit(absl::Overload(
[&dest](absl::Span<const char> view) {
dest.append(view.data(), view.size());
},
[&dest](absl::Span<const uint8_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
},
[&dest](absl::Span<const char16_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
},
[&dest](absl::Span<const char32_t> view) {
for (const auto& code_point : view) {
internal::Utf8Encode(dest, code_point);
}
}),
view_);
}
namespace common_internal {
class SourceImpl : public Source {
public:
SourceImpl(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets)
: description_(std::move(description)),
line_offsets_(std::move(line_offsets)) {}
absl::string_view description() const final { return description_; }
absl::Span<const SourcePosition> line_offsets() const final {
return absl::MakeConstSpan(line_offsets_);
}
private:
const std::string description_;
const absl::InlinedVector<SourcePosition, 1> line_offsets_;
};
namespace {
class AsciiSource final : public SourceImpl {
public:
AsciiSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char> text_;
};
class Latin1Source final : public SourceImpl {
public:
Latin1Source(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<uint8_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<uint8_t> text_;
};
class BasicPlaneSource final : public SourceImpl {
public:
BasicPlaneSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char16_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char16_t> text_;
};
class SupplementalPlaneSource final : public SourceImpl {
public:
SupplementalPlaneSource(std::string description,
absl::InlinedVector<SourcePosition, 1> line_offsets,
std::vector<char32_t> text)
: SourceImpl(std::move(description), std::move(line_offsets)),
text_(std::move(text)) {}
ContentView content() const override {
return MakeContentView(absl::MakeConstSpan(text_));
}
private:
const std::vector<char32_t> text_;
};
template <typename T>
struct SourceTextTraits;
template <>
struct SourceTextTraits<absl::string_view> {
using iterator_type = absl::string_view;
static iterator_type Begin(absl::string_view text) { return text; }
static void Advance(iterator_type& it, size_t n) { it.remove_prefix(n); }
static void AppendTo(std::vector<uint8_t>& out, absl::string_view text,
size_t n) {
const auto* in = reinterpret_cast<const uint8_t*>(text.data());
out.insert(out.end(), in, in + n);
}
static std::vector<char> ToVector(absl::string_view in) {
std::vector<char> out;
out.reserve(in.size());
out.insert(out.end(), in.begin(), in.end());
return out;
}
};
template <>
struct SourceTextTraits<absl::Cord> {
using iterator_type = absl::Cord::CharIterator;
static iterator_type Begin(const absl::Cord& text) {
return text.char_begin();
}
static void Advance(iterator_type& it, size_t n) {
absl::Cord::Advance(&it, n);
}
static void AppendTo(std::vector<uint8_t>& out, const absl::Cord& text,
size_t n) {
auto it = text.char_begin();
while (n > 0) {
auto str = absl::Cord::ChunkRemaining(it);
size_t to_append = std::min(n, str.size());
const auto* in = reinterpret_cast<const uint8_t*>(str.data());
out.insert(out.end(), in, in + to_append);
n -= to_append;
absl::Cord::Advance(&it, to_append);
}
}
static std::vector<char> ToVector(const absl::Cord& in) {
std::vector<char> out;
out.reserve(in.size());
for (const auto& chunk : in.Chunks()) {
out.insert(out.end(), chunk.begin(), chunk.end());
}
return out;
}
};
template <typename T>
absl::StatusOr<SourcePtr> NewSourceImpl(std::string description, const T& text,
const size_t text_size) {
if (ABSL_PREDICT_FALSE(
text_size >
static_cast<size_t>(std::numeric_limits<int32_t>::max()))) {
return absl::InvalidArgumentError("expression larger than 2GiB limit");
}
using Traits = SourceTextTraits<T>;
size_t index = 0;
typename Traits::iterator_type it = Traits::Begin(text);
SourcePosition offset = 0;
char32_t code_point;
size_t code_units;
std::vector<uint8_t> data8;
std::vector<char16_t> data16;
std::vector<char32_t> data32;
absl::InlinedVector<SourcePosition, 1> line_offsets;
while (index < text_size) {
std::tie(code_point, code_units) = cel::internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
cel::internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0x7f) {
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
if (code_point <= 0xff) {
data8.reserve(text_size);
Traits::AppendTo(data8, text, index);
data8.push_back(static_cast<uint8_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto latin1;
}
if (code_point <= 0xffff) {
data16.reserve(text_size);
for (size_t offset = 0; offset < index; offset++) {
data16.push_back(static_cast<uint8_t>(text[offset]));
}
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto basic;
}
data32.reserve(text_size);
for (size_t offset = 0; offset < index; offset++) {
data32.push_back(static_cast<char32_t>(text[offset]));
}
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<AsciiSource>(
std::move(description), std::move(line_offsets), Traits::ToVector(text));
latin1:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0xff) {
data8.push_back(static_cast<uint8_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
if (code_point <= 0xffff) {
data16.reserve(text_size);
for (const auto& value : data8) {
data16.push_back(value);
}
std::vector<uint8_t>().swap(data8);
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto basic;
}
data32.reserve(text_size);
for (const auto& value : data8) {
data32.push_back(value);
}
std::vector<uint8_t>().swap(data8);
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<Latin1Source>(
std::move(description), std::move(line_offsets), std::move(data8));
basic:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
if (code_point <= 0xffff) {
data16.push_back(static_cast<char16_t>(code_point));
Traits::Advance(it, code_units);
index += code_units;
++offset;
continue;
}
data32.reserve(text_size);
for (const auto& value : data16) {
data32.push_back(static_cast<char32_t>(value));
}
std::vector<char16_t>().swap(data16);
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
goto supplemental;
}
line_offsets.push_back(offset + 1);
return std::make_unique<BasicPlaneSource>(
std::move(description), std::move(line_offsets), std::move(data16));
supplemental:
while (index < text_size) {
std::tie(code_point, code_units) = internal::Utf8Decode(it);
if (ABSL_PREDICT_FALSE(code_point ==
internal::kUnicodeReplacementCharacter &&
code_units == 1)) {
return absl::InvalidArgumentError("cannot parse malformed UTF-8 input");
}
if (code_point == '\n') {
line_offsets.push_back(offset + 1);
}
data32.push_back(code_point);
Traits::Advance(it, code_units);
index += code_units;
++offset;
}
line_offsets.push_back(offset + 1);
return std::make_unique<SupplementalPlaneSource>(
std::move(description), std::move(line_offsets), std::move(data32));
}
}
}
absl::optional<SourceLocation> Source::GetLocation(
SourcePosition position) const {
if (auto line_and_offset = FindLine(position);
ABSL_PREDICT_TRUE(line_and_offset.has_value())) {
return SourceLocation{line_and_offset->first,
position - line_and_offset->second};
}
return absl::nullopt;
}
absl::optional<SourcePosition> Source::GetPosition(
const SourceLocation& location) const {
if (ABSL_PREDICT_FALSE(location.line < 1 || location.column < 0)) {
return absl::nullopt;
}
if (auto position = FindLinePosition(location.line);
ABSL_PREDICT_TRUE(position.has_value())) {
return *position + location.column;
}
return absl::nullopt;
}
absl::optional<std::string> Source::Snippet(int32_t line) const {
auto content = this->content();
auto start = FindLinePosition(line);
if (ABSL_PREDICT_FALSE(!start.has_value() || content.empty())) {
return absl::nullopt;
}
auto end = FindLinePosition(line + 1);
if (end.has_value()) {
return content.ToString(*start, *end - 1);
}
return content.ToString(*start);
}
std::string Source::DisplayErrorLocation(SourceLocation location) const {
constexpr char32_t kDot = '.';
constexpr char32_t kHat = '^';
constexpr char32_t kWideDot = 0xff0e;
constexpr char32_t kWideHat = 0xff3e;
absl::optional<std::string> snippet = Snippet(location.line);
if (!snippet || snippet->empty()) {
return "";
}
*snippet = absl::StrReplaceAll(*snippet, {{"\t", " "}});
absl::string_view snippet_view(*snippet);
std::string result;
absl::StrAppend(&result, "\n | ", *snippet);
absl::StrAppend(&result, "\n | ");
std::string index_line;
for (int32_t i = 0; i < location.column && !snippet_view.empty(); ++i) {
size_t count;
std::tie(std::ignore, count) = internal::Utf8Decode(snippet_view);
snippet_view.remove_prefix(count);
if (count > 1) {
internal::Utf8Encode(index_line, kWideDot);
} else {
internal::Utf8Encode(index_line, kDot);
}
}
size_t count = 0;
if (!snippet_view.empty()) {
std::tie(std::ignore, count) = internal::Utf8Decode(snippet_view);
}
if (count > 1) {
internal::Utf8Encode(index_line, kWideHat);
} else {
internal::Utf8Encode(index_line, kHat);
}
absl::StrAppend(&result, index_line);
return result;
}
absl::optional<SourcePosition> Source::FindLinePosition(int32_t line) const {
if (ABSL_PREDICT_FALSE(line < 1)) {
return absl::nullopt;
}
if (line == 1) {
return SourcePosition{0};
}
const auto line_offsets = this->line_offsets();
if (ABSL_PREDICT_TRUE(line <= static_cast<int32_t>(line_offsets.size()))) {
return line_offsets[static_cast<size_t>(line - 2)];
}
return absl::nullopt;
}
absl::optional<std::pair<int32_t, SourcePosition>> Source::FindLine(
SourcePosition position) const {
if (ABSL_PREDICT_FALSE(position < 0)) {
return absl::nullopt;
}
int32_t line = 1;
const auto line_offsets = this->line_offsets();
for (const auto& line_offset : line_offsets) {
if (line_offset > position) {
break;
}
++line;
}
if (line == 1) {
return std::make_pair(line, SourcePosition{0});
}
return std::make_pair(line, line_offsets[static_cast<size_t>(line) - 2]);
}
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(absl::string_view content,
std::string description) {
return common_internal::NewSourceImpl(std::move(description), content,
content.size());
}
absl::StatusOr<absl::Nonnull<SourcePtr>> NewSource(const absl::Cord& content,
std::string description) {
return common_internal::NewSourceImpl(std::move(description), content,
content.size());
}
} | #include "common/source.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
using ::testing::Optional;
TEST(SourceRange, Default) {
SourceRange range;
EXPECT_EQ(range.begin, -1);
EXPECT_EQ(range.end, -1);
}
TEST(SourceRange, Equality) {
EXPECT_THAT((SourceRange{}), (Eq(SourceRange{})));
EXPECT_THAT((SourceRange{0, 1}), (Ne(SourceRange{0, 0})));
}
TEST(SourceLocation, Default) {
SourceLocation location;
EXPECT_EQ(location.line, -1);
EXPECT_EQ(location.column, -1);
}
TEST(SourceLocation, Equality) {
EXPECT_THAT((SourceLocation{}), (Eq(SourceLocation{})));
EXPECT_THAT((SourceLocation{1, 1}), (Ne(SourceLocation{1, 0})));
}
TEST(StringSource, Description) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->description(), Eq("offset-test"));
}
TEST(StringSource, Content) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->content().ToString(),
Eq("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"));
}
TEST(StringSource, PositionAndLocation) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource("c.d &&\n\t b.c.arg(10) &&\n\t test(10)", "offset-test"));
EXPECT_THAT(source->line_offsets(), ElementsAre(7, 24, 35));
auto start = source->GetPosition(SourceLocation{int32_t{1}, int32_t{2}});
auto end = source->GetPosition(SourceLocation{int32_t{3}, int32_t{2}});
ASSERT_TRUE(start.has_value());
ASSERT_TRUE(end.has_value());
EXPECT_THAT(source->GetLocation(*start),
Optional(Eq(SourceLocation{int32_t{1}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(*end),
Optional(Eq(SourceLocation{int32_t{3}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(-1), Eq(absl::nullopt));
EXPECT_THAT(source->content().ToString(*start, *end),
Eq("d &&\n\t b.c.arg(10) &&\n\t "));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{0}, int32_t{0}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{1}, int32_t{-1}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{4}, int32_t{0}}),
Eq(absl::nullopt));
}
TEST(StringSource, SnippetSingle) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("hello, world", "one-line-test"));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello, world")));
EXPECT_THAT(source->Snippet(2), Eq(absl::nullopt));
}
TEST(StringSource, SnippetMulti) {
ASSERT_OK_AND_ASSIGN(auto source,
NewSource("hello\nworld\nmy\nbub\n", "four-line-test"));
EXPECT_THAT(source->Snippet(0), Eq(absl::nullopt));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello")));
EXPECT_THAT(source->Snippet(2), Optional(Eq("world")));
EXPECT_THAT(source->Snippet(3), Optional(Eq("my")));
EXPECT_THAT(source->Snippet(4), Optional(Eq("bub")));
EXPECT_THAT(source->Snippet(5), Optional(Eq("")));
EXPECT_THAT(source->Snippet(6), Eq(absl::nullopt));
}
TEST(CordSource, Description) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->description(), Eq("offset-test"));
}
TEST(CordSource, Content) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->content().ToString(),
Eq("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"));
}
TEST(CordSource, PositionAndLocation) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("c.d &&\n\t b.c.arg(10) &&\n\t test(10)"),
"offset-test"));
EXPECT_THAT(source->line_offsets(), ElementsAre(7, 24, 35));
auto start = source->GetPosition(SourceLocation{int32_t{1}, int32_t{2}});
auto end = source->GetPosition(SourceLocation{int32_t{3}, int32_t{2}});
ASSERT_TRUE(start.has_value());
ASSERT_TRUE(end.has_value());
EXPECT_THAT(source->GetLocation(*start),
Optional(Eq(SourceLocation{int32_t{1}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(*end),
Optional(Eq(SourceLocation{int32_t{3}, int32_t{2}})));
EXPECT_THAT(source->GetLocation(-1), Eq(absl::nullopt));
EXPECT_THAT(source->content().ToString(*start, *end),
Eq("d &&\n\t b.c.arg(10) &&\n\t "));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{0}, int32_t{0}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{1}, int32_t{-1}}),
Eq(absl::nullopt));
EXPECT_THAT(source->GetPosition(SourceLocation{int32_t{4}, int32_t{0}}),
Eq(absl::nullopt));
}
TEST(CordSource, SnippetSingle) {
ASSERT_OK_AND_ASSIGN(auto source,
NewSource(absl::Cord("hello, world"), "one-line-test"));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello, world")));
EXPECT_THAT(source->Snippet(2), Eq(absl::nullopt));
}
TEST(CordSource, SnippetMulti) {
ASSERT_OK_AND_ASSIGN(
auto source,
NewSource(absl::Cord("hello\nworld\nmy\nbub\n"), "four-line-test"));
EXPECT_THAT(source->Snippet(0), Eq(absl::nullopt));
EXPECT_THAT(source->Snippet(1), Optional(Eq("hello")));
EXPECT_THAT(source->Snippet(2), Optional(Eq("world")));
EXPECT_THAT(source->Snippet(3), Optional(Eq("my")));
EXPECT_THAT(source->Snippet(4), Optional(Eq("bub")));
EXPECT_THAT(source->Snippet(5), Optional(Eq("")));
EXPECT_THAT(source->Snippet(6), Eq(absl::nullopt));
}
TEST(Source, DisplayErrorLocationBasic) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello' +\n 'world'"));
SourceLocation location{2, 3};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'world'"
"\n | ...^");
}
TEST(Source, DisplayErrorLocationOutOfRange) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello world!'"));
SourceLocation location{3, 3};
EXPECT_EQ(source->DisplayErrorLocation(location), "");
}
TEST(Source, DisplayErrorLocationTabsShortened) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello' +\n\t\t'world!'"));
SourceLocation location{2, 4};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'world!'"
"\n | ....^");
}
TEST(Source, DisplayErrorLocationFullWidth) {
ASSERT_OK_AND_ASSIGN(auto source, NewSource("'Hello'"));
SourceLocation location{1, 2};
EXPECT_EQ(source->DisplayErrorLocation(location),
"\n | 'Hello'"
"\n | ..^");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/source.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/source_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a035bbb0-9445-464c-9174-e02ca7f9ceaa | cpp | tensorflow/tensorflow | quantize_weights | tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights_test.cc | #include <memory>
#include <optional>
#include <utility>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Verifier.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Interfaces/CallInterfaces.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Rewrite/FrozenRewritePatternSet.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
namespace {
class QuantizeWeightsPass
: public mlir::PassWrapper<QuantizeWeightsPass, OperationPass<ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizeWeightsPass)
explicit QuantizeWeightsPass() : test_mode_(true) { initializeForTest(); }
explicit QuantizeWeightsPass(
const tensorflow::quantization::QuantizationOptions& quant_options)
: test_mode_(false), quant_options_(quant_options) {}
QuantizeWeightsPass(const QuantizeWeightsPass& other) {
test_mode_ = other.test_mode_;
quant_options_ = other.quant_options_;
initializeForTest();
}
StringRef getArgument() const final {
return "quant-quantize-weights";
}
StringRef getDescription() const final {
return "Quantize weights used by quantizable ops.";
}
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect, quant::QuantDialect>();
}
private:
void runOnOperation() override;
bool test_mode_;
tensorflow::quantization::QuantizationOptions quant_options_;
void initializeForTest() {
if (!test_mode_) return;
tensorflow::quantization::QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(
tensorflow::quantization::QuantizationComponentSpec::TENSORTYPE_INT_8);
auto mutable_quant_method = quant_options_.mutable_quantization_method();
*mutable_quant_method->add_quantization_component_specs() = quant_spec;
}
};
class QuantizeConstWeights : public OpRewritePattern<TF::ConstOp> {
public:
explicit QuantizeConstWeights(
MLIRContext* context,
const tensorflow::quantization::QuantizationOptions& quantization_options)
: OpRewritePattern<TF::ConstOp>(context),
quant_options_(quantization_options) {}
LogicalResult matchAndRewrite(TF::ConstOp op,
PatternRewriter& rewriter) const override {
auto weight_component_spec = GetWeightComponentSpec(quant_options_);
if (!weight_component_spec) return failure();
if (failed((isQuantizableWeight(op)))) {
return failure();
}
if (failed(quantizeOps(rewriter, op, weight_component_spec.value()))) {
return failure();
}
return success();
}
private:
bool checkIfAnyUserIsConnectedToTermiantor(BlockArgument op) const {
for (const auto& user : op.getUsers()) {
if (user->template hasTrait<OpTrait::IsTerminator>()) return true;
if (auto next_user = dyn_cast_or_null<TF::IdentityOp>(user)) {
return (*(next_user->getResult(0).getUsers().begin()))
->template hasTrait<OpTrait::IsTerminator>();
}
}
return false;
}
bool hasUsageFromQuantizableOp(TF::ConstOp op) const {
llvm::SmallVector<mlir::Value> uses_at_current_level{op};
while (!uses_at_current_level.empty()) {
llvm::SmallVector<mlir::Value> next_values_to_visit;
for (auto cur_op : uses_at_current_level) {
for (auto& cur_op_use : cur_op.getUses()) {
Operation* next_op = cur_op_use.getOwner();
int next_op_operand_num = cur_op_use.getOperandNumber();
if (auto call_op = llvm::dyn_cast<mlir::CallOpInterface>(next_op)) {
mlir::func::FuncOp func =
llvm::dyn_cast<mlir::func::FuncOp>(call_op.resolveCallable());
if (!func) continue;
next_values_to_visit.push_back(
func.getArgument(next_op_operand_num));
} else if (auto while_op =
llvm::dyn_cast_or_null<TF::WhileOp>(next_op)) {
func::FuncOp func = while_op.body_function();
auto func_argument = func.getArgument(next_op_operand_num);
if (checkIfAnyUserIsConnectedToTermiantor(func_argument))
next_values_to_visit.push_back(
func.getArgument(next_op_operand_num));
} else if (IsOpWithQuantizableTrait(next_op)) {
return true;
} else if (IsOpWithDataMovementTrait(next_op)) {
next_values_to_visit.insert(next_values_to_visit.end(),
next_op->getResults().begin(),
next_op->getResults().end());
}
}
}
uses_at_current_level.swap(next_values_to_visit);
}
return false;
}
LogicalResult isQuantizableWeight(TF::ConstOp op) const {
if (!IsValueWithQuantizablePrecision(op)) return failure();
if (!hasUsageFromQuantizableOp(op)) return failure();
int num_elements_threshold = quant_options_.min_num_elements_for_weights();
int num_elements = cast<ShapedType>(op.getType()).getNumElements();
if (num_elements < num_elements_threshold) {
op->emitRemark("Quantization is skipped because the op has ")
<< num_elements << " elements which is fewer than the threshold("
<< num_elements_threshold << " elements).";
return failure();
}
return success();
}
LogicalResult quantizeOps(PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec&
weight_component_spec) const {
if (weight_component_spec.tensor_type() ==
tensorflow::quantization::QuantizationComponentSpec::TENSORTYPE_INT_8) {
auto dequantized_val =
ApplyUniformQuantization(rewriter, op, weight_component_spec);
if (!dequantized_val.has_value()) return failure();
op.getOutput().replaceAllUsesWith(dequantized_val.value().getResult(0));
return success();
}
op->emitRemark("Not supported quantization data type.");
return failure();
}
protected:
tensorflow::quantization::QuantizationOptions quant_options_;
};
static PassRegistration<QuantizeWeightsPass> pass;
void QuantizeWeightsPass::runOnOperation() {
MLIRContext* ctx = &getContext();
auto module_op = getOperation();
RewritePatternSet patterns(ctx);
patterns.add<QuantizeConstWeights>(ctx, quant_options_);
FrozenRewritePatternSet frozen_patterns(std::move(patterns));
for (auto func : module_op.getOps<func::FuncOp>()) {
if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) {
func.emitError() << "quant-quantize-weights failed.";
signalPassFailure();
}
}
}
}
std::unique_ptr<OperationPass<ModuleOp>> CreateQuantizeWeightsPass(
const tensorflow::quantization::QuantizationOptions& quant_options) {
return std::make_unique<QuantizeWeightsPass>(quant_options);
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights.h"
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/init_main.h"
#include "tsl/platform/path.h"
namespace {
std::string* g_test_model_dir = nullptr;
}
namespace mlir {
namespace lite {
namespace toco_legacy {
namespace {
using mlir::TFL::FlatBufferModelAbslError;
using tflite::BuiltinOperator_CONV_2D;
using tflite::BuiltinOperator_CUSTOM;
using tflite::BuiltinOperator_DEQUANTIZE;
using tflite::GetModel;
using tflite::Model;
using tflite::TensorType_FLOAT16;
using tflite::TensorType_FLOAT32;
using tflite::TensorType_INT8;
std::unique_ptr<FlatBufferModelAbslError> ReadTestModel() {
auto model_path = tsl::io::JoinPath(
*g_test_model_dir, ::mlir::lite::internal::kConvModelWith0Plus10Weights);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
std::unique_ptr<FlatBufferModelAbslError> ReadSharedWeightsTestModel() {
auto model_path = tsl::io::JoinPath(
*g_test_model_dir, ::mlir::lite::internal::kModelWithSharedWeights);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
std::unique_ptr<FlatBufferModelAbslError> ReadGatherTestModel() {
auto model_path = tsl::io::JoinPath(
*g_test_model_dir, ::mlir::lite::internal::kQuantizedWithGather);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
std::unique_ptr<FlatBufferModelAbslError> ReadCustomOpTestModel() {
auto model_path = tsl::io::JoinPath(
*g_test_model_dir, ::mlir::lite::internal::kModelWithCustomOp);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
template <typename T>
std::vector<T> GetAsVector(const flatbuffers::Vector<T>* vec) {
return std::vector<T>(vec->begin(), vec->end());
}
class QuantizeWeightsTest : public testing::Test {
protected:
QuantizeWeightsTest() = default;
void LoadBasicModel() {
input_model_ = ReadTestModel();
model_ = input_model_->GetModel();
}
void LoadSharedWeightsModel() {
input_model_ = ReadSharedWeightsTestModel();
model_ = input_model_->GetModel();
}
void LoadGatherTestModel() {
input_model_ = ReadGatherTestModel();
model_ = input_model_->GetModel();
}
void LoadCustomOpTestModel() {
input_model_ = ReadCustomOpTestModel();
model_ = input_model_->GetModel();
}
std::unique_ptr<FlatBufferModelAbslError> input_model_;
const Model* model_;
bool IsModelInputOrOutput(const Model* model, uint32_t tensor_idx) {
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto subgraph = model->subgraphs()->Get(subgraph_idx);
for (size_t i = 0; i < subgraph->inputs()->size(); ++i) {
if (subgraph->inputs()->Get(i) == tensor_idx) {
return true;
}
}
for (size_t i = 0; i < subgraph->outputs()->size(); ++i) {
if (subgraph->outputs()->Get(i) == tensor_idx) {
return true;
}
}
}
return false;
}
bool GetProducerOpCode(const Model* model, uint32_t subgraph_idx,
uint32_t tensor_idx,
tflite::BuiltinOperator* op_code) {
const auto subgraph = model->subgraphs()->Get(subgraph_idx);
for (size_t op_idx = 0; op_idx < subgraph->operators()->size(); ++op_idx) {
const auto op = subgraph->operators()->Get(op_idx);
for (size_t i = 0; i < op->outputs()->size(); ++i) {
if (op->outputs()->Get(i) == tensor_idx) {
const uint32_t op_code_idx = op->opcode_index();
*op_code = GetBuiltinCode(model->operator_codes()->Get(op_code_idx));
return true;
}
}
}
return false;
}
};
TEST_F(QuantizeWeightsTest, QuantizationSucceeds) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(
QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
}
TEST_F(QuantizeWeightsTest, WeightsMinNumElements) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
const uint64_t kWeightsMinNumElements = 1000000;
ASSERT_TRUE(QuantizeWeights(&builder, model_, kWeightsMinNumElements,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
subgraph_idx++) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size());
for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
const auto float_tensor = float_graph->tensors()->Get(i);
EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer());
EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable());
EXPECT_EQ(GetAsVector(quant_tensor->shape()),
GetAsVector(float_tensor->shape()));
EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str());
EXPECT_EQ(quant_tensor->type(), float_tensor->type());
}
}
}
TEST_F(QuantizeWeightsTest, HybridConv) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(
QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
subgraph_idx++) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size());
ASSERT_EQ(quantized_graph->operators()->size(), 1);
const auto op = quantized_graph->operators()->Get(0);
const uint32_t op_code_idx = op->opcode_index();
ASSERT_EQ(GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)),
BuiltinOperator_CONV_2D);
for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
const auto float_tensor = float_graph->tensors()->Get(i);
EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer());
EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable());
EXPECT_EQ(GetAsVector(quant_tensor->shape()),
GetAsVector(float_tensor->shape()));
EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str());
if (quant_tensor->name()->str() == "conv_bias") {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (IsModelInputOrOutput(output_model, i)) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->buffer() != 0) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8)
<< quant_tensor->name()->str();
auto shape = GetAsVector(quant_tensor->shape());
if (kUseUpdatedHybridSchemeDefault) {
EXPECT_EQ(quant_tensor->quantization()->scale()->size(), shape[0]);
} else {
EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 1);
}
} else {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
}
}
}
}
TEST_F(QuantizeWeightsTest, DequantizeConv) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(internal::QuantizeWeights(&builder, model_, 0,
false,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size() + 1);
int32_t dequant_input_idx = -1;
int32_t dequant_output_idx = -1;
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) ==
BuiltinOperator_DEQUANTIZE) {
dequant_input_idx = op->inputs()->Get(0);
dequant_output_idx = op->outputs()->Get(0);
}
}
ASSERT_GT(dequant_input_idx, -1);
ASSERT_GT(dequant_output_idx, -1);
for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
if (i == dequant_input_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8);
} else if (i == dequant_output_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (IsModelInputOrOutput(output_model, i)) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->name()->str() == "conv_bias") {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->buffer() != 0) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8);
} else {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
}
}
}
}
TEST_F(QuantizeWeightsTest, DequantizeConvFloat16) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(QuantizeWeights(&builder, model_, BufferType::QUANTIZED_FLOAT16,
kUseUpdatedHybridSchemeDefault,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size() + 2);
int32_t dequant_input_idx = -1;
int32_t dequant_output_idx = -1;
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) ==
BuiltinOperator_DEQUANTIZE) {
dequant_input_idx = op->inputs()->Get(0);
dequant_output_idx = op->outputs()->Get(0);
}
}
ASSERT_GT(dequant_input_idx, -1);
ASSERT_GT(dequant_output_idx, -1);
for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
if (i == dequant_input_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16);
} else if (i == dequant_output_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (IsModelInputOrOutput(output_model, i)) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->name()->str() == "conv_bias") {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16);
} else if (quant_tensor->buffer() != 0) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT16);
} else {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
}
}
}
}
TEST_F(QuantizeWeightsTest, SharedWeights_Hybrid) {
LoadSharedWeightsModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(
QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
uint32_t num_conv_ops = 0;
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
const auto op_code =
GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx));
if (op_code == BuiltinOperator_CONV_2D) {
num_conv_ops++;
const auto weights_tensor =
quantized_graph->tensors()->Get(op->inputs()->Get(1));
EXPECT_EQ(weights_tensor->type(), TensorType_INT8);
}
}
}
EXPECT_EQ(num_conv_ops, 2);
}
TEST_F(QuantizeWeightsTest, SharedWeights_Dequantize) {
LoadSharedWeightsModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(internal::QuantizeWeights(&builder, model_, 0,
false,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
uint32_t num_conv_ops = 0;
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
const auto op_code =
GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx));
if (op_code == BuiltinOperator_CONV_2D) {
num_conv_ops++;
uint32_t weights_tensor_index = op->inputs()->Get(1);
const auto weights_tensor =
quantized_graph->tensors()->Get(weights_tensor_index);
EXPECT_EQ(weights_tensor->type(), TensorType_FLOAT32);
BuiltinOperator producer_op_code;
ASSERT_TRUE(GetProducerOpCode(output_model, subgraph_idx,
weights_tensor_index, &producer_op_code));
EXPECT_EQ(producer_op_code, BuiltinOperator_DEQUANTIZE);
}
}
}
EXPECT_EQ(num_conv_ops, 2);
}
TEST_F(QuantizeWeightsTest, VerifyGatherQuantization) {
LoadGatherTestModel();
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(
QuantizeWeights(&builder, model_, 0, QuantizerType::OLD_QUANTIZER).ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
const auto op_code =
GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx));
if (op_code == tflite::BuiltinOperator_GATHER) {
uint32_t input_tensor_index = op->inputs()->Get(0);
const auto weights_tensor =
quantized_graph->tensors()->Get(input_tensor_index);
EXPECT_EQ(weights_tensor->type(), TensorType_INT8);
}
}
}
}
TEST_F(QuantizeWeightsTest, VerifyCustomOpQuantizationDequantize) {
LoadCustomOpTestModel();
CustomOpMap custom_op_map;
custom_op_map["CustomTestOp"] = {
.quantizable_input_indices = {1},
.is_hybrid = false,
};
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
const auto quantized_graph = output_model->subgraphs()->Get(0);
ASSERT_EQ(quantized_graph->operators()->size(),
model_->subgraphs()->Get(0)->operators()->size() + 1);
int num_custom_ops_found = 0;
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
const auto op_code =
GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx));
if (op_code == BuiltinOperator_CUSTOM) {
uint32_t weights_tensor_index = op->inputs()->Get(1);
const auto weights_tensor =
quantized_graph->tensors()->Get(weights_tensor_index);
EXPECT_EQ(weights_tensor->type(), TensorType_FLOAT32);
BuiltinOperator producer_op_code;
ASSERT_TRUE(GetProducerOpCode(output_model, 0, weights_tensor_index,
&producer_op_code));
EXPECT_EQ(producer_op_code, BuiltinOperator_DEQUANTIZE);
num_custom_ops_found++;
}
}
EXPECT_EQ(num_custom_ops_found, 1);
}
TEST_F(QuantizeWeightsTest, VerifyCustomOpQuantizationHybrid) {
LoadCustomOpTestModel();
CustomOpMap custom_op_map;
custom_op_map["CustomTestOp"] = {
.quantizable_input_indices = {1},
.is_hybrid = true,
};
flatbuffers::FlatBufferBuilder builder;
ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map,
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
const auto quantized_graph = output_model->subgraphs()->Get(0);
ASSERT_EQ(quantized_graph->operators()->size(),
model_->subgraphs()->Get(0)->operators()->size());
int num_custom_ops_found = 0;
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
const auto op_code =
GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx));
if (op_code == BuiltinOperator_CUSTOM) {
uint32_t weights_tensor_index = op->inputs()->Get(1);
const auto weights_tensor =
quantized_graph->tensors()->Get(weights_tensor_index);
EXPECT_EQ(weights_tensor->type(), TensorType_INT8);
num_custom_ops_found++;
}
}
EXPECT_EQ(num_custom_ops_found, 1);
}
TEST_F(QuantizeWeightsTest, VerifyUpdatedHybridSchemeFalseQuantizationHybrid) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
const CustomOpMap custom_op_map;
ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map,
false,
{}, QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
subgraph_idx++) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size());
ASSERT_EQ(quantized_graph->operators()->size(), 1);
const auto op = quantized_graph->operators()->Get(0);
const uint32_t op_code_idx = op->opcode_index();
ASSERT_EQ(GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)),
BuiltinOperator_CONV_2D);
for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
const auto float_tensor = float_graph->tensors()->Get(i);
EXPECT_EQ(quant_tensor->buffer(), float_tensor->buffer());
EXPECT_EQ(quant_tensor->is_variable(), float_tensor->is_variable());
EXPECT_EQ(GetAsVector(quant_tensor->shape()),
GetAsVector(float_tensor->shape()));
EXPECT_EQ(quant_tensor->name()->str(), float_tensor->name()->str());
if (quant_tensor->name()->str() == "conv_bias") {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (IsModelInputOrOutput(output_model, i)) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->buffer() != 0) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8)
<< quant_tensor->name()->str();
auto shape = GetAsVector(quant_tensor->shape());
EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 1);
} else {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
}
}
}
}
TEST_F(QuantizeWeightsTest, DequantizeConvBlocklisted) {
LoadBasicModel();
flatbuffers::FlatBufferBuilder builder;
const CustomOpMap custom_op_map;
ASSERT_TRUE(QuantizeWeights(&builder, model_, 0, custom_op_map,
true,
{BuiltinOperator_CONV_2D},
QuantizerType::OLD_QUANTIZER)
.ok());
const uint8_t* buffer = builder.GetBufferPointer();
const Model* output_model = GetModel(buffer);
ASSERT_TRUE(output_model);
ASSERT_EQ(output_model->subgraphs()->size(), model_->subgraphs()->size());
for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size();
++subgraph_idx) {
const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
ASSERT_EQ(quantized_graph->tensors()->size(),
float_graph->tensors()->size() + 1);
int32_t dequant_input_idx = -1;
int32_t dequant_output_idx = -1;
for (size_t i = 0; i < quantized_graph->operators()->size(); ++i) {
const auto op = quantized_graph->operators()->Get(i);
const uint32_t op_code_idx = op->opcode_index();
if (GetBuiltinCode(output_model->operator_codes()->Get(op_code_idx)) ==
BuiltinOperator_DEQUANTIZE) {
dequant_input_idx = op->inputs()->Get(0);
dequant_output_idx = op->outputs()->Get(0);
}
}
ASSERT_GT(dequant_input_idx, -1);
ASSERT_GT(dequant_output_idx, -1);
for (size_t i = 0; i < quantized_graph->tensors()->size(); ++i) {
const auto quant_tensor = quantized_graph->tensors()->Get(i);
if (i == dequant_input_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8);
EXPECT_EQ(quant_tensor->quantization()->scale()->size(), 5);
EXPECT_EQ(quant_tensor->quantization()->quantized_dimension(), 0);
} else if (i == dequant_output_idx) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (IsModelInputOrOutput(output_model, i)) {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->name()->str() == "conv_bias") {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
} else if (quant_tensor->buffer() != 0) {
EXPECT_EQ(quant_tensor->type(), TensorType_INT8);
} else {
EXPECT_EQ(quant_tensor->type(), TensorType_FLOAT32);
}
}
}
}
}
}
}
}
int main(int argc, char** argv) {
std::string model_file;
const std::vector<tsl::Flag> flag_list = {
tsl::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tsl::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir = new std::string(tsl::io::Dirname(model_file));
::tsl::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_weights.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantize_weights_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ca9acea-9d42-468d-b041-88b5b74e6f47 | cpp | tensorflow/tensorflow | semaphore | third_party/xla/xla/pjrt/semaphore.cc | third_party/xla/xla/pjrt/semaphore_test.cc | #include "xla/pjrt/semaphore.h"
#include <cstdint>
#include "absl/synchronization/mutex.h"
#include "tsl/platform/logging.h"
namespace xla {
Semaphore::Semaphore(int64_t capacity)
: value_(capacity), max_capacity_(capacity) {
CHECK_GE(capacity, 0);
}
bool Semaphore::CanAcquire(CanAcquireArgs* args) {
return args->semaphore->value_ >= args->amount;
}
void Semaphore::Acquire(int64_t amount) {
CHECK_GE(amount, 0);
CanAcquireArgs args;
args.semaphore = this;
args.amount = amount;
mu_.LockWhen(absl::Condition(&CanAcquire, &args));
value_ -= amount;
mu_.Unlock();
}
bool Semaphore::TryAcquire(int64_t amount) {
CHECK_GE(amount, 0);
absl::MutexLock lock(&mu_);
if (value_ >= amount) {
value_ -= amount;
return true;
}
return false;
}
void Semaphore::Release(int64_t amount) {
CHECK_GE(amount, 0);
absl::MutexLock lock(&mu_);
value_ += amount;
}
Semaphore::ScopedReservation::~ScopedReservation() {
if (semaphore_) {
semaphore_->Release(amount_);
}
}
Semaphore::ScopedReservation::ScopedReservation(
ScopedReservation&& other) noexcept {
semaphore_ = other.semaphore_;
amount_ = other.amount_;
other.semaphore_ = nullptr;
}
Semaphore::ScopedReservation& Semaphore::ScopedReservation::operator=(
ScopedReservation&& other) noexcept {
semaphore_ = other.semaphore_;
amount_ = other.amount_;
other.semaphore_ = nullptr;
return *this;
}
Semaphore::ScopedReservation Semaphore::ScopedAcquire(int64_t amount) {
Acquire(amount);
return ScopedReservation(this, amount);
}
} | #include "xla/pjrt/semaphore.h"
#include <gtest/gtest.h>
#include "absl/synchronization/notification.h"
#include "xla/test.h"
#include "tsl/platform/env.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace {
TEST(SemaphoreTest, UnthreadedTests) {
Semaphore semaphore(2);
EXPECT_EQ(semaphore.capacity(), 2);
EXPECT_FALSE(semaphore.TryAcquire(semaphore.capacity() + 1));
EXPECT_TRUE(semaphore.TryAcquire(semaphore.capacity()));
semaphore.Release(semaphore.capacity());
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(2);
semaphore.Release(2);
semaphore.Acquire(1);
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(1);
semaphore.Release(1);
semaphore.Acquire(1);
semaphore.Release(2);
{
auto a = semaphore.ScopedAcquire(1);
EXPECT_EQ(a.amount(), 1);
{ auto b = semaphore.ScopedAcquire(1); }
{ auto c = semaphore.ScopedAcquire(1); }
}
{
auto d = semaphore.ScopedAcquire(2);
EXPECT_EQ(d.amount(), 2);
}
}
TEST(SemaphoreTest, ConcurrentTest) {
tsl::thread::ThreadPool pool(tsl::Env::Default(), "test", 2);
Semaphore semaphore(2);
semaphore.Acquire(1);
absl::Notification a_done;
pool.Schedule([&]() {
semaphore.Acquire(2);
semaphore.Release(2);
a_done.Notify();
});
absl::Notification b_done;
pool.Schedule([&]() {
semaphore.Acquire(1);
semaphore.Release(1);
b_done.Notify();
});
b_done.WaitForNotification();
EXPECT_FALSE(a_done.HasBeenNotified());
semaphore.Release(1);
a_done.WaitForNotification();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/semaphore.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/semaphore_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e5fed20-1d82-45b2-a25a-4ceb840b9908 | cpp | abseil/abseil-cpp | node_hash_map | absl/container/node_hash_map.h | absl/container/node_hash_map_test.cc | #ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
#define ABSL_CONTAINER_NODE_HASH_MAP_H_
#include <cstddef>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy;
}
template <class Key, class Value, class Hash = DefaultHashContainerHash<Key>,
class Eq = DefaultHashContainerEq<Key>,
class Alloc = std::allocator<std::pair<const Key, Value>>>
class ABSL_ATTRIBUTE_OWNER node_hash_map
: public absl::container_internal::raw_hash_map<
absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
Alloc> {
using Base = typename node_hash_map::raw_hash_map;
public:
node_hash_map() {}
using Base::Base;
using Base::begin;
using Base::cbegin;
using Base::cend;
using Base::end;
using Base::capacity;
using Base::empty;
using Base::max_size;
using Base::size;
using Base::clear;
using Base::erase;
using Base::insert;
using Base::insert_or_assign;
using Base::emplace;
using Base::emplace_hint;
using Base::try_emplace;
using Base::extract;
using Base::merge;
using Base::swap;
using Base::rehash;
using Base::reserve;
using Base::at;
using Base::contains;
using Base::count;
using Base::equal_range;
using Base::find;
using Base::operator[];
using Base::bucket_count;
using Base::load_factor;
using Base::max_load_factor;
using Base::get_allocator;
using Base::hash_function;
using Base::key_eq;
};
template <typename K, typename V, typename H, typename E, typename A,
typename Predicate>
typename node_hash_map<K, V, H, E, A>::size_type erase_if(
node_hash_map<K, V, H, E, A>& c, Predicate pred) {
return container_internal::EraseIf(pred, &c);
}
template <typename K, typename V, typename H, typename E, typename A>
void swap(node_hash_map<K, V, H, E, A>& x,
node_hash_map<K, V, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
return x.swap(y);
}
namespace container_internal {
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(const node_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>&& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
}
namespace container_internal {
template <class Key, class Value>
class NodeHashMapPolicy
: public absl::container_internal::node_slot_policy<
std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
using value_type = std::pair<const Key, Value>;
public:
using key_type = Key;
using mapped_type = Value;
using init_type = std::pair< key_type, mapped_type>;
template <class Allocator, class... Args>
static value_type* new_element(Allocator* alloc, Args&&... args) {
using PairAlloc = typename absl::allocator_traits<
Allocator>::template rebind_alloc<value_type>;
PairAlloc pair_alloc(*alloc);
value_type* res =
absl::allocator_traits<PairAlloc>::allocate(pair_alloc, 1);
absl::allocator_traits<PairAlloc>::construct(pair_alloc, res,
std::forward<Args>(args)...);
return res;
}
template <class Allocator>
static void delete_element(Allocator* alloc, value_type* pair) {
using PairAlloc = typename absl::allocator_traits<
Allocator>::template rebind_alloc<value_type>;
PairAlloc pair_alloc(*alloc);
absl::allocator_traits<PairAlloc>::destroy(pair_alloc, pair);
absl::allocator_traits<PairAlloc>::deallocate(pair_alloc, pair, 1);
}
template <class F, class... Args>
static decltype(absl::container_internal::DecomposePair(
std::declval<F>(), std::declval<Args>()...))
apply(F&& f, Args&&... args) {
return absl::container_internal::DecomposePair(std::forward<F>(f),
std::forward<Args>(args)...);
}
static size_t element_space_used(const value_type*) {
return sizeof(value_type);
}
static Value& value(value_type* elem) { return elem->second; }
static const Value& value(const value_type* elem) { return elem->second; }
template <class Hash>
static constexpr HashSlotFn get_hash_slot_fn() {
return memory_internal::IsLayoutCompatible<Key, Value>::value
? &TypeErasedDerefAndApplyToSlotFn<Hash, Key>
: nullptr;
}
};
}
namespace container_algorithm_internal {
template <class Key, class T, class Hash, class KeyEqual, class Allocator>
struct IsUnorderedContainer<
absl::node_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/node_hash_map.h"
#include <cstddef>
#include <new>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/tracked.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
using MapTypes = ::testing::Types<
absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const int, int>>>,
absl::node_hash_map<std::string, std::string, StatefulTestingHash,
StatefulTestingEqual,
Alloc<std::pair<const std::string, std::string>>>>;
INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ConstructorTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, LookupTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, MembersTest, MapTypes);
INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ModifiersTest, MapTypes);
using M = absl::node_hash_map<std::string, Tracked<int>>;
TEST(NodeHashMap, Emplace) {
M m;
Tracked<int> t(53);
m.emplace("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
m.emplace(std::string("a"), t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
std::string a("a");
m.emplace(a, t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
const std::string ca("a");
m.emplace(a, t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(1, t.num_copies());
m.emplace(std::make_pair("a", t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(2, t.num_copies());
m.emplace(std::make_pair(std::string("a"), t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(3, t.num_copies());
std::pair<std::string, Tracked<int>> p("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(4, t.num_copies());
m.emplace(p);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(4, t.num_copies());
const std::pair<std::string, Tracked<int>> cp("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(5, t.num_copies());
m.emplace(cp);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(5, t.num_copies());
std::pair<const std::string, Tracked<int>> pc("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(6, t.num_copies());
m.emplace(pc);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(6, t.num_copies());
const std::pair<const std::string, Tracked<int>> cpc("a", t);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(cpc);
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(std::piecewise_construct, std::forward_as_tuple("a"),
std::forward_as_tuple(t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
m.emplace(std::piecewise_construct, std::forward_as_tuple(std::string("a")),
std::forward_as_tuple(t));
ASSERT_EQ(0, t.num_moves());
ASSERT_EQ(7, t.num_copies());
}
TEST(NodeHashMap, AssignRecursive) {
struct Tree {
absl::node_hash_map<int, Tree> children;
};
Tree root;
const Tree& child = root.children.emplace().first->second;
root = child;
}
TEST(FlatHashMap, MoveOnlyKey) {
struct Key {
Key() = default;
Key(Key&&) = default;
Key& operator=(Key&&) = default;
};
struct Eq {
bool operator()(const Key&, const Key&) const { return true; }
};
struct Hash {
size_t operator()(const Key&) const { return 0; }
};
absl::node_hash_map<Key, int, Hash, Eq> m;
m[Key()];
}
struct NonMovableKey {
explicit NonMovableKey(int i) : i(i) {}
NonMovableKey(NonMovableKey&&) = delete;
int i;
};
struct NonMovableKeyHash {
using is_transparent = void;
size_t operator()(const NonMovableKey& k) const { return k.i; }
size_t operator()(int k) const { return k; }
};
struct NonMovableKeyEq {
using is_transparent = void;
bool operator()(const NonMovableKey& a, const NonMovableKey& b) const {
return a.i == b.i;
}
bool operator()(const NonMovableKey& a, int b) const { return a.i == b; }
};
TEST(NodeHashMap, MergeExtractInsert) {
absl::node_hash_map<NonMovableKey, int, NonMovableKeyHash, NonMovableKeyEq>
set1, set2;
set1.emplace(std::piecewise_construct, std::make_tuple(7),
std::make_tuple(-7));
set1.emplace(std::piecewise_construct, std::make_tuple(17),
std::make_tuple(-17));
set2.emplace(std::piecewise_construct, std::make_tuple(7),
std::make_tuple(-70));
set2.emplace(std::piecewise_construct, std::make_tuple(19),
std::make_tuple(-190));
auto Elem = [](int key, int value) {
return Pair(Field(&NonMovableKey::i, key), value);
};
EXPECT_THAT(set1, UnorderedElementsAre(Elem(7, -7), Elem(17, -17)));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(19, -190)));
static_assert(!std::is_move_constructible<NonMovableKey>::value, "");
set1.merge(set2);
EXPECT_THAT(set1,
UnorderedElementsAre(Elem(7, -7), Elem(17, -17), Elem(19, -190)));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
auto node = set1.extract(7);
EXPECT_TRUE(node);
EXPECT_EQ(node.key().i, 7);
EXPECT_EQ(node.mapped(), -7);
EXPECT_THAT(set1, UnorderedElementsAre(Elem(17, -17), Elem(19, -190)));
auto insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_FALSE(insert_result.inserted);
EXPECT_TRUE(insert_result.node);
EXPECT_EQ(insert_result.node.key().i, 7);
EXPECT_EQ(insert_result.node.mapped(), -7);
EXPECT_THAT(*insert_result.position, Elem(7, -70));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
node = set1.extract(17);
EXPECT_TRUE(node);
EXPECT_EQ(node.key().i, 17);
EXPECT_EQ(node.mapped(), -17);
EXPECT_THAT(set1, UnorderedElementsAre(Elem(19, -190)));
node.mapped() = 23;
insert_result = set2.insert(std::move(node));
EXPECT_FALSE(node);
EXPECT_TRUE(insert_result.inserted);
EXPECT_FALSE(insert_result.node);
EXPECT_THAT(*insert_result.position, Elem(17, 23));
EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(17, 23)));
}
bool FirstIsEven(std::pair<const int, int> p) { return p.first % 2 == 0; }
TEST(NodeHashMap, EraseIf) {
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return true; }), 5);
EXPECT_THAT(s, IsEmpty());
}
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
EXPECT_EQ(erase_if(s, [](std::pair<const int, int>) { return false; }), 0);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
Pair(4, 4), Pair(5, 5)));
}
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
EXPECT_EQ(erase_if(s,
[](std::pair<const int, int> kvp) {
return kvp.first % 2 == 1;
}),
3);
EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
}
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
EXPECT_EQ(erase_if(s, FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
{
node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
EXPECT_EQ(erase_if(s, &FirstIsEven), 2);
EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
}
}
TEST(NodeHashMap, CForEach) {
node_hash_map<int, int> m;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
m, [&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
const node_hash_map<int, int>& cm = m;
absl::container_internal::c_for_each_fast(
cm, [&v](const std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
node_hash_map<int, int>(m),
[&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
m[i] = i;
expected.emplace_back(i, i);
}
}
TEST(NodeHashMap, CForEachMutate) {
node_hash_map<int, int> s;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
s, [&v](std::pair<const int, int>& p) {
v.push_back(p);
p.second++;
});
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
for (auto& p : expected) {
p.second++;
}
EXPECT_THAT(s, UnorderedElementsAreArray(expected));
s[i] = i;
expected.emplace_back(i, i);
}
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
node_hash_map<std::string, std::string> map;
map["key1"] = "mapped";
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
}
#endif
TEST(NodeHashMap, RecursiveTypeCompiles) {
struct RecursiveType {
node_hash_map<int, RecursiveType> m;
};
RecursiveType t;
t.m[0] = RecursiveType{};
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/node_hash_map.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/node_hash_map_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
fa345464-a4cd-4aaa-9072-1bca6967a7b8 | cpp | tensorflow/tensorflow | gradients | tensorflow/c/eager/gradients.cc | tensorflow/c/eager/gradients_test.cc | #include "tensorflow/c/eager/gradients.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/gradients_internal.h"
#include "tensorflow/core/common_runtime/eager/attr_builder.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace gradients {
namespace {
int64_t ToId(const AbstractTensorHandle* t) {
return static_cast<int64_t>(reinterpret_cast<uintptr_t>(t));
}
Status ZerosLike(AbstractContext* ctx, AbstractTensorHandle* t,
AbstractTensorHandle** result) {
AbstractOperationPtr op(ctx->CreateOperation());
TF_RETURN_IF_ERROR(op->Reset("ZerosLike", nullptr));
if (isa<tracing::TracingOperation>(op.get())) {
TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingOperation>(op.get())->SetOpName(
absl::StrCat("ZerosLike", ToId(t)).c_str()));
}
TF_RETURN_IF_ERROR(op->AddInput(t));
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs));
*result = outputs[0];
return absl::OkStatus();
}
}
Status GradientRegistry::Register(
const string& op_name, GradientFunctionFactory gradient_function_factory) {
auto iter = registry_.find(op_name);
if (iter != registry_.end()) {
const string error_msg = "Gradient already exists for op: " + op_name + ".";
return errors::AlreadyExists(error_msg);
}
registry_.insert({op_name, gradient_function_factory});
return absl::OkStatus();
}
Status GradientRegistry::Lookup(
const ForwardOperation& op,
std::unique_ptr<GradientFunction>* gradient_function) const {
auto iter = registry_.find(op.op_name);
if (iter == registry_.end()) {
const string error_msg = "No gradient defined for op: " + op.op_name + ".";
return errors::NotFound(error_msg);
}
gradient_function->reset(iter->second(op));
return absl::OkStatus();
}
TapeTensor::TapeTensor(AbstractTensorHandle* handle) : handle_(handle) {
handle_->Ref();
}
TapeTensor::TapeTensor(const TapeTensor& other) {
handle_ = other.handle_;
handle_->Ref();
}
TapeTensor::~TapeTensor() { handle_->Unref(); }
int64_t TapeTensor::GetID() const { return ToId(handle_); }
tensorflow::DataType TapeTensor::GetDType() const {
return handle_->DataType();
}
AbstractTensorHandle* TapeTensor::GetHandle() const { return handle_; }
AbstractTensorHandle* TapeTensor::ZerosLike() const { return nullptr; }
class TapeVSpace
: public eager::VSpace<AbstractTensorHandle, GradientFunction, TapeTensor> {
public:
explicit TapeVSpace(AbstractContext* ctx) : ctx_(ctx) {}
~TapeVSpace() override {}
int64_t NumElements(AbstractTensorHandle* tensor) const override;
AbstractTensorHandle* AggregateGradients(
gtl::ArraySlice<AbstractTensorHandle*> gradient_tensors) const override;
Status CallBackwardFunction(
const string& op_type, GradientFunction* gradient_function,
const std::vector<int64_t>& unneeded_gradients,
gtl::ArraySlice<AbstractTensorHandle*> output_gradients,
absl::Span<AbstractTensorHandle*> result) const override;
Status BuildOnesLike(const TapeTensor& t,
AbstractTensorHandle** result) const override;
int64_t TensorId(AbstractTensorHandle* tensor) const override;
TapeTensor TapeTensorFromGradient(AbstractTensorHandle* g) const override;
void MarkAsResult(AbstractTensorHandle* gradient) const override;
void DeleteGradient(AbstractTensorHandle* gradient) const override;
private:
AbstractContext* ctx_;
};
int64_t TapeVSpace::NumElements(AbstractTensorHandle* tensor) const {
return 1;
}
AbstractTensorHandle* TapeVSpace::AggregateGradients(
gtl::ArraySlice<AbstractTensorHandle*> gradient_tensors) const {
if (gradient_tensors.size() == 1) {
return gradient_tensors[0];
}
AbstractOperationPtr op(ctx_->CreateOperation());
Status s = op->Reset("AddN", nullptr);
if (!s.ok()) {
return nullptr;
}
s = op->AddInputList(gradient_tensors);
if (!s.ok()) {
return nullptr;
}
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
s = op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs);
if (!s.ok()) {
return nullptr;
}
return outputs[0];
}
Status TapeVSpace::CallBackwardFunction(
const string& op_type, GradientFunction* gradient_function,
const std::vector<int64_t>& unneeded_gradients,
gtl::ArraySlice<AbstractTensorHandle*> output_gradients,
absl::Span<AbstractTensorHandle*> result) const {
if (gradient_function == nullptr) {
return errors::InvalidArgument(
"Provided null gradient_function for '", op_type, "'.\n",
"If the intent is to treat this op as non-differentiable consider "
"using RegisterNotDifferentiable or "
"NotDifferentiableGradientFunction.");
}
return gradient_function->Compute(ctx_, output_gradients, result);
}
Status TapeVSpace::BuildOnesLike(const TapeTensor& t,
AbstractTensorHandle** result) const {
AbstractOperationPtr op(ctx_->CreateOperation());
TF_RETURN_IF_ERROR(op->Reset("OnesLike", nullptr));
if (isa<tracing::TracingOperation>(op.get())) {
TF_RETURN_IF_ERROR(dyn_cast<tracing::TracingOperation>(op.get())->SetOpName(
absl::StrCat("OnesLike", ToId(t.GetHandle())).c_str()));
}
TF_RETURN_IF_ERROR(op->AddInput(t.GetHandle()));
int num_outputs = 1;
std::vector<AbstractTensorHandle*> outputs(num_outputs);
TF_RETURN_IF_ERROR(
op->Execute(absl::Span<AbstractTensorHandle*>(outputs), &num_outputs));
*result = outputs[0];
return absl::OkStatus();
}
int64_t TapeVSpace::TensorId(AbstractTensorHandle* tensor) const {
return ToId(tensor);
}
TapeTensor TapeVSpace::TapeTensorFromGradient(AbstractTensorHandle* g) const {
return TapeTensor(g);
}
void TapeVSpace::MarkAsResult(AbstractTensorHandle* gradient) const {}
void TapeVSpace::DeleteGradient(AbstractTensorHandle* gradient) const {
gradient->Unref();
}
void Tape::Watch(const AbstractTensorHandle* t) {
GradientTape::Watch(ToId(t));
}
void Tape::RecordOperation(absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle* const> outputs,
GradientFunction* gradient_function,
const string& op_name) {
std::vector<int64_t> input_ids(inputs.size());
std::vector<tensorflow::DataType> input_dtypes(inputs.size());
for (int i = 0; i < inputs.size(); i++) {
input_ids[i] = ToId(inputs[i]);
input_dtypes[i] = inputs[i]->DataType();
}
std::vector<TapeTensor> tape_tensors;
tape_tensors.reserve(outputs.size());
for (auto t : outputs) {
tape_tensors.push_back(TapeTensor(t));
}
GradientTape::RecordOperation(
op_name, tape_tensors, input_ids, input_dtypes,
[gradient_function]() -> GradientFunction* { return gradient_function; },
[](GradientFunction* ptr) {
if (ptr) {
delete ptr;
}
});
}
bool Tape::ShouldRecord(
absl::Span<const AbstractTensorHandle* const> tensors) const {
std::vector<int64_t> tensor_ids(tensors.size());
std::vector<tensorflow::DataType> tensor_dtypes(tensors.size());
for (int i = 0; i < tensors.size(); i++) {
tensor_ids[i] = ToId(tensors[i]);
tensor_dtypes[i] = tensors[i]->DataType();
}
return GradientTape::ShouldRecord(tensor_ids, tensor_dtypes);
}
void Tape::DeleteTrace(const AbstractTensorHandle* t) {
GradientTape::DeleteTrace(ToId(t));
}
std::vector<int64_t> MakeTensorIDList(
absl::Span<AbstractTensorHandle* const> tensors) {
std::vector<int64_t> ids(tensors.size());
for (int i = 0; i < tensors.size(); i++) {
ids[i] = ToId(tensors[i]);
}
return ids;
}
Status Tape::ComputeGradient(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> targets,
absl::Span<AbstractTensorHandle* const> sources,
absl::Span<AbstractTensorHandle* const> output_gradients,
absl::Span<AbstractTensorHandle*> result) {
TapeVSpace vspace(ctx);
std::vector<int64_t> target_tensor_ids = MakeTensorIDList(targets);
std::vector<int64_t> source_tensor_ids = MakeTensorIDList(sources);
tensorflow::gtl::FlatSet<int64_t> sources_set(source_tensor_ids.begin(),
source_tensor_ids.end());
std::unordered_map<int64_t, TapeTensor> sources_that_are_targets;
for (int i = 0; i < target_tensor_ids.size(); ++i) {
int64_t target_id = target_tensor_ids[i];
if (sources_set.find(target_id) != sources_set.end()) {
auto tensor = targets[i];
sources_that_are_targets.insert(
std::make_pair(target_id, TapeTensor(tensor)));
}
}
TF_RETURN_IF_ERROR(GradientTape::ComputeGradient(
vspace, target_tensor_ids, source_tensor_ids, sources_that_are_targets,
output_gradients, result, false));
return absl::OkStatus();
}
namespace internal {
Status Reset(AbstractOperation* op_, const char* op,
const char* raw_device_name, ForwardOperation* forward_op_) {
forward_op_->op_name = op;
forward_op_->attrs.Reset(op);
return op_->Reset(op, raw_device_name);
}
Status AddInput(AbstractOperation* op_, AbstractTensorHandle* input,
ForwardOperation* forward_op_) {
TF_RETURN_IF_ERROR(op_->AddInput(input));
forward_op_->inputs.push_back(input);
return absl::OkStatus();
}
Status AddInputList(AbstractOperation* op_,
absl::Span<AbstractTensorHandle* const> inputs,
ForwardOperation* forward_op_) {
TF_RETURN_IF_ERROR(op_->AddInputList(inputs));
for (auto input : inputs) {
forward_op_->inputs.push_back(input);
}
return absl::OkStatus();
}
Status SetAttrString(AbstractOperation* op_, const char* attr_name,
const char* data, size_t length,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, StringPiece(data, length));
return op_->SetAttrString(attr_name, data, length);
}
Status SetAttrInt(AbstractOperation* op_, const char* attr_name, int64_t value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, static_cast<int64_t>(value));
return op_->SetAttrInt(attr_name, value);
}
Status SetAttrFloat(AbstractOperation* op_, const char* attr_name, float value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrFloat(attr_name, value);
}
Status SetAttrBool(AbstractOperation* op_, const char* attr_name, bool value,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrBool(attr_name, value);
}
Status SetAttrType(AbstractOperation* op_, const char* attr_name,
DataType value, ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name, value);
return op_->SetAttrType(attr_name, value);
}
Status SetAttrShape(AbstractOperation* op_, const char* attr_name,
const int64_t* dims, const int num_dims,
ForwardOperation* forward_op_) {
if (num_dims > TensorShape::MaxDimensions()) {
return errors::InvalidArgument("Value specified for `", attr_name, "` has ",
num_dims,
" dimensions which is over the limit of ",
TensorShape::MaxDimensions(), ".");
}
TensorShapeProto proto;
if (num_dims < 0) {
proto.set_unknown_rank(true);
} else {
for (int d = 0; d < num_dims; ++d) {
proto.add_dim()->set_size(dims[d]);
}
}
forward_op_->attrs.Set(attr_name, proto);
return op_->SetAttrShape(attr_name, dims, num_dims);
}
Status SetAttrFunction(AbstractOperation* op_, const char* attr_name,
const AbstractOperation* value,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunction has not been implemented yet.");
}
Status SetAttrFunctionName(AbstractOperation* op_, const char* attr_name,
const char* value, size_t length,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunctionName has not been implemented "
"yet.");
}
Status SetAttrTensor(AbstractOperation* op_, const char* attr_name,
AbstractTensorInterface* tensor,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrTensor has not been implemented yet.");
}
Status SetAttrStringList(AbstractOperation* op_, const char* attr_name,
const void* const* values, const size_t* lengths,
int num_values, ForwardOperation* forward_op_) {
std::vector<StringPiece> v(num_values);
for (int i = 0; i < num_values; ++i) {
v[i] = StringPiece(static_cast<const char*>(values[i]), lengths[i]);
}
forward_op_->attrs.Set(attr_name, v);
return op_->SetAttrStringList(attr_name, values, lengths, num_values);
}
Status SetAttrFloatList(AbstractOperation* op_, const char* attr_name,
const float* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const float>(values, num_values));
return op_->SetAttrFloatList(attr_name, values, num_values);
}
Status SetAttrIntList(AbstractOperation* op_, const char* attr_name,
const int64_t* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(
attr_name, gtl::ArraySlice<const int64_t>(
reinterpret_cast<const int64_t*>(values), num_values));
return op_->SetAttrIntList(attr_name, values, num_values);
}
Status SetAttrTypeList(AbstractOperation* op_, const char* attr_name,
const DataType* values, int num_values,
ForwardOperation* forward_op_) {
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const DataType>(values, num_values));
return op_->SetAttrTypeList(attr_name, values, num_values);
}
Status SetAttrBoolList(AbstractOperation* op_, const char* attr_name,
const unsigned char* values, int num_values,
ForwardOperation* forward_op_) {
std::unique_ptr<bool[]> b(new bool[num_values]);
for (int i = 0; i < num_values; ++i) {
b[i] = values[i];
}
forward_op_->attrs.Set(attr_name,
gtl::ArraySlice<const bool>(b.get(), num_values));
return op_->SetAttrBoolList(attr_name, values, num_values);
}
Status SetAttrShapeList(AbstractOperation* op_, const char* attr_name,
const int64_t** dims, const int* num_dims,
int num_values, ForwardOperation* forward_op_) {
std::unique_ptr<TensorShapeProto[]> proto(new TensorShapeProto[num_values]);
for (int i = 0; i < num_values; ++i) {
const auto num_dims_i = num_dims[i];
if (num_dims_i > TensorShape::MaxDimensions()) {
return errors::InvalidArgument(
strings::StrCat("Value specified for `", attr_name, "` has ",
num_dims_i, " dimensions which is over the limit of ",
TensorShape::MaxDimensions(), "."));
}
if (num_dims_i < 0) {
proto[i].set_unknown_rank(true);
} else {
const int64_t* dims_i = dims[i];
auto proto_i = &proto[i];
for (int d = 0; d < num_dims_i; ++d) {
proto_i->add_dim()->set_size(dims_i[d]);
}
}
}
forward_op_->attrs.Set(
attr_name, gtl::ArraySlice<TensorShapeProto>(proto.get(), num_values));
return op_->SetAttrShapeList(attr_name, dims, num_dims, num_values);
}
Status SetAttrFunctionList(AbstractOperation* op_, const char* attr_name,
absl::Span<const AbstractOperation*> values,
ForwardOperation* forward_op_) {
return tensorflow::errors::Unimplemented(
"SetAttrFunctionList has not been "
"implemented yet.");
}
Status Execute(AbstractOperation* op_, AbstractContext* ctx,
absl::Span<AbstractTensorHandle*> retvals, int* num_retvals,
ForwardOperation* forward_op_, Tape* tape,
const GradientRegistry& registry) {
TF_RETURN_IF_ERROR(op_->Execute(retvals, num_retvals));
for (int i = 0; i < *num_retvals; i++) {
forward_op_->outputs.push_back(retvals[i]);
}
forward_op_->attrs.BuildNodeDef();
std::unique_ptr<GradientFunction> gradient_fn;
TF_RETURN_IF_ERROR(registry.Lookup(*forward_op_, &gradient_fn));
tape->RecordOperation(forward_op_->inputs, retvals, gradient_fn.release(),
op_->Name());
return absl::OkStatus();
}
}
}
} | #include "tensorflow/c/eager/gradients.h"
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api_experimental.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/gradients_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/experimental/gradients/math_grad.h"
#include "tensorflow/c/experimental/gradients/not_differentiable.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/experimental/ops/math_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/c/tf_tensor.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using std::vector;
using tensorflow::TF_StatusPtr;
using tracing::TracingOperation;
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
};
Status RegisterGradients(GradientRegistry* registry) {
TF_RETURN_IF_ERROR(RegisterNotDifferentiable(registry, "CheckNumerics"));
return absl::OkStatus();
}
TEST_P(CppGradients, TestSetAttrString) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr t;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 1.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
t.reset(x_raw);
}
AbstractOperationPtr check_numerics_op(ctx->CreateOperation());
ForwardOperation forward_op;
Status s = Reset(check_numerics_op.get(), "CheckNumerics",
nullptr, &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
if (isa<TracingOperation>(check_numerics_op.get())) {
s = dyn_cast<TracingOperation>(check_numerics_op.get())
->SetOpName("check_numerics");
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
s = AddInput(check_numerics_op.get(), t.get(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
string message = "This is the way!";
s = SetAttrString(check_numerics_op.get(), "message", message.data(),
message.length(), &forward_op);
ASSERT_EQ(errors::OK, s.code()) << s.message();
int num_retvals = 1;
std::vector<AbstractTensorHandle*> outputs(1);
GradientRegistry registry;
s = RegisterGradients(®istry);
ASSERT_EQ(errors::OK, s.code()) << s.message();
auto tape = std::make_unique<Tape>(false);
s = Execute(check_numerics_op.get(), ctx.get(), absl::MakeSpan(outputs),
&num_retvals, &forward_op, tape.get(), registry);
ASSERT_EQ(errors::OK, s.code()) << s.message();
string read_message;
s = forward_op.attrs.Get("message", &read_message);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_EQ(read_message, message);
}
Status RecordOperationWithNullGradientFunctionModel(
AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
Tape tape(false);
tape.Watch(inputs[0]);
AbstractTensorHandle* neg_output;
TF_RETURN_IF_ERROR(ops::Neg(ctx, inputs[0], &neg_output, "Neg"));
tape.RecordOperation(inputs, {neg_output}, nullptr, "Neg");
return tape.ComputeGradient(ctx,
{neg_output},
inputs,
{}, outputs);
}
TEST_P(CppGradients, TestRecordOperationWithNullGradientFunctionRaises) {
std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)> status(
TF_NewStatus(), TF_DeleteStatus);
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
std::vector<AbstractTensorHandle*> outputs(1);
Status s = RunModel(RecordOperationWithNullGradientFunctionModel, ctx.get(),
{x.get()}, absl::MakeSpan(outputs),
!std::get<2>(GetParam()));
ASSERT_EQ(error::INVALID_ARGUMENT, s.code());
ASSERT_EQ(
"Provided null gradient_function for 'Neg'.\nIf the intent is to treat "
"this op as non-differentiable consider using RegisterNotDifferentiable "
"or NotDifferentiableGradientFunction.",
s.message());
ASSERT_EQ(nullptr, outputs[0]);
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradients.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/gradients_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22542dd5-4db0-4aa6-a96e-2d2b524faeed | cpp | tensorflow/tensorflow | graph_def | tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h | tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_GRAPH_DEF_H_
#define TENSORFLOW_COMPILER_MLIR_QUANTIZATION_STABLEHLO_CC_GRAPH_DEF_H_
#include <type_traits>
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
namespace stablehlo::quantization {
template <typename FuncT, typename = std::enable_if_t<std::is_invocable_r_v<
void, FuncT, tensorflow::NodeDef&>>>
void MutateNodeDefs(tensorflow::GraphDef& graph_def, FuncT&& func) {
for (tensorflow::NodeDef& node_def : *graph_def.mutable_node()) {
func(node_def);
}
for (tensorflow::FunctionDef& function_def :
*graph_def.mutable_library()->mutable_function()) {
for (tensorflow::NodeDef& node_def : *function_def.mutable_node_def()) {
func(node_def);
}
}
}
}
#endif | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tsl/platform/protobuf.h"
namespace stablehlo::quantization {
namespace {
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
TEST(GraphDefTest, MutateNodeDefsMutatesTopLevelNodeDefs) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
node { name: "foo" }
)pb",
&graph_def));
MutateNodeDefs(graph_def,
[](NodeDef& node_def) { node_def.set_name("bar"); });
ASSERT_THAT(graph_def.node(), SizeIs(1));
EXPECT_THAT(graph_def.node()[0].name(), StrEq("bar"));
}
TEST(GraphDefTest, MutateNodeDefsMutatesFunctionNodeDefs) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(
library { function { node_def { name: "foo" } } }
)pb",
&graph_def));
MutateNodeDefs(graph_def,
[](NodeDef& node_def) { node_def.set_name("bar"); });
ASSERT_THAT(graph_def.library().function(), SizeIs(1));
ASSERT_THAT(graph_def.library().function()[0].node_def(), SizeIs(1));
EXPECT_THAT(graph_def.library().function()[0].node_def()[0].name(),
StrEq("bar"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/graph_def_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f912456d-8bbf-4712-ae1d-caa5d3675f31 | cpp | tensorflow/tensorflow | spmd_partitioner_util | third_party/xla/xla/service/spmd/spmd_partitioner_util.cc | third_party/xla/xla/service/spmd/spmd_partitioner_util_test.cc | #include "xla/service/spmd/spmd_partitioner_util.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/spmd/spmd_partitioner.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace spmd {
namespace {
using hlo_sharding_util::GroupedSharding;
}
bool HasReplicatedSharding(const HloSharding& sharding) {
if (sharding.IsTuple()) {
return absl::c_any_of(sharding.tuple_elements(), HasReplicatedSharding);
}
return sharding.IsReplicated();
}
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
bool EvenlyPartitions(const Shape& shape, const HloSharding& sharding) {
if (sharding.IsTuple()) {
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
if (!EvenlyPartitions(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}))) {
return false;
}
}
}
if (sharding.IsTileMaximal()) {
return sharding.IsReplicated();
}
for (int64_t i = 0; i < shape.dimensions_size(); ++i) {
if (shape.dimensions(i) % sharding.tile_assignment().dim(i) != 0) {
return false;
}
}
return true;
}
Shape MakePartitionedShape(const Shape& shape, const HloSharding& sharding) {
if (sharding.IsTuple()) {
std::vector<Shape> subshapes;
const int64_t shape_n = ShapeUtil::TupleElementCount(shape);
subshapes.reserve(shape_n);
for (int64_t i = 0; i < shape_n; ++i) {
subshapes.push_back(
MakePartitionedShape(ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i})));
}
return ShapeUtil::MakeTupleShape(subshapes);
}
return sharding.TileShape(shape);
}
int64_t ShapeSizeInBytes(const Shape& shape) {
if (shape.IsTuple()) {
int64_t total_size = 0;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) {
total_size += ShapeSizeInBytes(ShapeUtil::GetTupleElementShape(shape, i));
}
return total_size;
}
return ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type()) *
ShapeUtil::ElementsIn(shape);
}
Shape MakeNonPaddedShapeForGivenPartition(const Shape& shape,
const HloSharding& sharding,
int64_t partition_id) {
if (sharding.IsTuple()) {
std::vector<Shape> subshapes;
const int64_t shape_n = ShapeUtil::TupleElementCount(shape);
subshapes.reserve(shape_n);
for (int64_t i = 0; i < shape_n; ++i) {
subshapes.push_back(MakeNonPaddedShapeForGivenPartition(
ShapeUtil::GetTupleElementShape(shape, i),
sharding.GetSubSharding(shape, {i}), partition_id));
}
return ShapeUtil::MakeTupleShape(subshapes);
}
if (sharding.IsReplicated()) {
return shape;
}
if (sharding.IsTileMaximal()) {
if (partition_id == *sharding.UniqueDevice()) {
return shape;
}
return ShapeUtil::MakeTupleShape({});
}
auto partition_shape = shape;
std::vector<int64_t> tile_offset =
sharding.TileOffsetForDevice(shape, partition_id);
std::vector<int64_t> tile_limit =
sharding.TileLimitForDevice(shape, partition_id);
for (int64_t i = 0; i < tile_offset.size(); ++i) {
if (sharding.UsesDevice(partition_id)) {
partition_shape.set_dimensions(i, tile_limit[i] - tile_offset[i]);
} else {
partition_shape.set_dimensions(i, 0);
}
}
return partition_shape;
}
std::vector<HloInstruction*> MakePartitionOffsets(
const Shape& shape, const HloSharding& sharding,
HloInstruction* partition_id, SpmdBuilder* b,
absl::Span<const int64_t> dims) {
CHECK(!shape.IsTuple());
auto shard_shape = MakePartitionedShape(shape, sharding);
std::vector<HloInstruction*> offsets;
for (int64_t i = 0; i < shape.rank(); ++i) {
if (sharding.tile_assignment().dim(i) == 1 ||
(!dims.empty() && !absl::c_linear_search(dims, i))) {
offsets.push_back(b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32))));
} else {
std::vector<int32_t> offset_array(
sharding.tile_assignment().num_elements());
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
offset_array[device] = indices[i] * shard_shape.dimensions(i);
});
offsets.push_back(
TableLookup<int32_t>(offset_array, S32, partition_id, b));
}
}
return offsets;
}
std::vector<HloInstruction*> MakeTiledPartitionOrdinals(
const HloSharding& sharding, HloInstruction* partition_id, SpmdBuilder* b) {
CHECK(!sharding.IsTileMaximal());
auto dimensions = sharding.tile_assignment().dimensions();
if (sharding.ReplicateOnLastTileDim()) {
dimensions.remove_suffix(1);
}
auto table_shape = ShapeUtil::MakeShape(S32, dimensions);
return MakePartitionOffsets(table_shape, sharding, partition_id, b);
}
Shape GetPaddedShapeForUnevenPartitioning(const Shape& base_shape,
const HloSharding& sharding) {
if (sharding.IsTileMaximal()) {
return base_shape;
}
if (EvenlyPartitions(base_shape, sharding)) {
return base_shape;
}
auto shard_shape = MakePartitionedShape(base_shape, sharding);
Shape padded_base_shape = base_shape;
for (int64_t i = 0; i < padded_base_shape.rank(); ++i) {
padded_base_shape.set_dimensions(
i, shard_shape.dimensions(i) * sharding.tile_assignment().dim(i));
}
return padded_base_shape;
}
HloInstruction* GetInGroupPartitionId(
HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b) {
int64_t total_devices = device_groups.size() * device_groups[0].size();
std::vector<uint32_t> in_group_ids(total_devices);
for (uint32_t i = 0; i < device_groups.size(); ++i) {
for (uint32_t j = 0; j < device_groups[i].size(); ++j) {
in_group_ids[device_groups[i][j]] = j;
}
}
return TableLookup<uint32_t>(in_group_ids, U32, partition_id, b);
}
namespace {
bool IsIota(absl::Span<const int64_t> x) {
for (int64_t i = 0; i < x.size(); ++i) {
if (x[i] != i) {
return false;
}
}
return true;
}
SPMDCollectiveOpsCreator GetPerGroupCollectiveOpsCreator(
const SPMDCollectiveOpsCreator& creator,
const std::vector<std::vector<int64_t>>& device_groups) {
if (device_groups.size() == 1 && IsIota(device_groups[0])) {
return creator;
}
SPMDCollectiveOpsCreator result;
auto device_groups_ptr =
std::make_shared<const std::vector<std::vector<int64_t>>>(device_groups);
result.create_partition_id = [creator, device_groups_ptr](SpmdBuilder* b) {
return GetInGroupPartitionId(creator.create_partition_id(b),
*device_groups_ptr, b);
};
auto expand_partition_groups =
[device_groups_ptr](
const std::vector<std::vector<int64_t>>& partition_subgroups) {
auto& device_groups = *device_groups_ptr;
if (partition_subgroups.empty()) {
return device_groups;
}
std::vector<std::vector<int64_t>> result(partition_subgroups.size() *
device_groups.size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t i = 0; i < partition_subgroups.size(); ++i) {
result[g * partition_subgroups.size() + i].resize(
partition_subgroups[i].size());
for (int64_t j = 0; j < partition_subgroups[i].size(); ++j) {
result[g * partition_subgroups.size() + i][j] =
device_groups[g][partition_subgroups[i][j]];
}
}
}
return result;
};
result.create_cross_partition_all_reduce =
[creator, expand_partition_groups](
SpmdBuilder* b, HloInstruction* operand, HloComputation* reduction,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id) {
return creator.create_cross_partition_all_reduce(
b, operand, reduction, expand_partition_groups(partition_subgroups),
channel_id);
};
result.create_cross_partition_collective_permute =
[creator, device_groups_ptr](
SpmdBuilder* b, HloInstruction* operand,
std::vector<std::pair<int64_t, int64_t>>& src_dst_pairs,
int64_t next_channel_id) {
auto& device_groups = *device_groups_ptr;
std::vector<std::pair<int64_t, int64_t>> expanded_pairs(
src_dst_pairs.size() * device_groups.size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t i = 0; i < src_dst_pairs.size(); ++i) {
expanded_pairs[g * src_dst_pairs.size() + i] =
std::pair<int64_t, int64_t>{
device_groups[g][src_dst_pairs[i].first],
device_groups[g][src_dst_pairs[i].second]};
}
}
return creator.create_cross_partition_collective_permute(
b, operand, expanded_pairs, next_channel_id);
};
result.create_cross_partition_all_to_all =
[creator, expand_partition_groups](
SpmdBuilder* b, absl::Span<HloInstruction* const> operands,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, std::optional<int64_t> split_dimension) {
return creator.create_cross_partition_all_to_all(
b, operands, expand_partition_groups(partition_subgroups),
channel_id, split_dimension);
};
if (creator.create_cross_partition_all_gather) {
result.create_cross_partition_all_gather =
[creator, expand_partition_groups](
SpmdBuilder* b, HloInstruction* operand, const Shape& ag_shape,
const std::vector<std::vector<int64_t>>& partition_subgroups,
int64_t channel_id, int64_t all_gather_dimension) {
return creator.create_cross_partition_all_gather(
b, operand, ag_shape,
expand_partition_groups(partition_subgroups), channel_id,
all_gather_dimension);
};
}
return result;
}
}
std::optional<HloSharding> PartialReplicateReshardCompatibleSharding(
const HloSharding& partial_sharding, const HloSharding& target_sharding) {
if (!partial_sharding.ReplicateOnLastTileDim()) {
return std::nullopt;
}
if (partial_sharding.tile_assignment().num_elements() !=
target_sharding.tile_assignment().num_elements()) {
return std::nullopt;
}
const int64_t rank = partial_sharding.TiledDataRank();
if (rank != target_sharding.TiledDataRank()) {
return std::nullopt;
}
std::vector<int64_t> expand_tile_dims_indices(rank, -1);
std::vector<int64_t> expand_tile_sizes;
int64_t num_expand_dims = 0;
for (int64_t dim = 0; dim < rank; dim++) {
int64_t partial_tile_size = partial_sharding.tile_assignment().dim(dim);
int64_t target_tile_size = target_sharding.tile_assignment().dim(dim);
if (target_tile_size % partial_tile_size != 0) {
return std::nullopt;
}
if (target_tile_size > partial_tile_size) {
expand_tile_dims_indices[dim] = num_expand_dims++;
expand_tile_sizes.emplace_back(target_tile_size / partial_tile_size);
}
}
const std::vector<int64_t> shape_dims(
target_sharding.tile_assignment().dimensions().begin(),
target_sharding.tile_assignment().dimensions().begin() + rank);
if (hlo_sharding_util::IsSubTilingOrEqualSharding(
ShapeUtil::MakeShape(F32, shape_dims), target_sharding,
partial_sharding)) {
return target_sharding;
}
std::vector<int64_t> reshape_dimensions(
partial_sharding.tile_assignment().dimensions().begin(),
partial_sharding.tile_assignment().dimensions().begin() + rank);
reshape_dimensions.insert(reshape_dimensions.end(), expand_tile_sizes.begin(),
expand_tile_sizes.end());
std::vector<int> perm;
perm.reserve(rank + expand_tile_sizes.size());
for (int64_t dim = 0; dim < rank; dim++) {
perm.emplace_back(dim);
if (expand_tile_dims_indices[dim] > -1) {
perm.emplace_back(expand_tile_dims_indices[dim] + rank);
}
}
if (target_sharding.ReplicateOnLastTileDim()) {
reshape_dimensions.push_back(
target_sharding.tile_assignment().dimensions().back());
perm.push_back(reshape_dimensions.size() - 1);
}
auto transpose_tile_assignment =
partial_sharding.tile_assignment()
.Reshape(reshape_dimensions)
.Transpose(perm)
.Reshape(target_sharding.tile_assignment().dimensions());
return target_sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(transpose_tile_assignment)
: HloSharding::Tile(transpose_tile_assignment);
}
std::optional<HloInstruction*> TileToPartialReplicateHaloExchange(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& replicate_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b) {
auto padded_src_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, src_sharding);
auto padded_dst_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, dst_sharding);
if (ShapeUtil::Compatible(padded_dst_shape, hlo->shape())) {
return hlo;
}
auto partition_ordinals =
MakeTiledPartitionOrdinals(src_sharding, partition_id, b);
auto result = hlo;
auto hlo_shape = hlo->shape();
for (auto dim : replicate_dims) {
int64_t src_shard_count = src_sharding.tile_assignment().dim(dim);
int64_t dst_shard_count = dst_sharding.tile_assignment().dim(dim);
int64_t src_per_dst_shard_size =
padded_src_shape.dimensions(dim) / dst_shard_count;
int64_t dst_per_shard_size =
padded_dst_shape.dimensions(dim) / dst_shard_count;
if (src_per_dst_shard_size <= dst_per_shard_size || dst_shard_count == 1) {
continue;
}
int64_t replicate_factor = src_shard_count / dst_shard_count;
OffsetCalculation left_halo_size_function = OffsetCalculation(
HloOpcode::kMultiply,
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
0, src_per_dst_shard_size - dst_per_shard_size, 1)),
OffsetCalculation(
MultiplyAddDivideOffsetCalculation(1, 0, replicate_factor)));
OffsetCalculation right_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(0, 0, 1)) -
left_halo_size_function;
result = ExchangeHaloCompact(result, base_shape, left_halo_size_function,
right_halo_size_function, nullptr, dim,
src_sharding, partition_ordinals[dim],
collective_ops_creator, next_channel_id, b);
}
return result;
}
std::optional<HloInstruction*> PadFromPartialReplicateShape(
HloInstruction* hlo, const Shape& base_shape,
const HloSharding& src_sharding, const HloSharding& dst_sharding,
const std::vector<int64_t>& expand_tile_dims,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, HloInstruction* partition_id, SpmdBuilder* b) {
auto padded_src_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, src_sharding);
auto padded_dst_shape =
GetPaddedShapeForUnevenPartitioning(base_shape, dst_sharding);
if (ShapeUtil::Compatible(padded_dst_shape, hlo->shape())) {
return hlo;
}
auto partition_ordinals =
MakeTiledPartitionOrdinals(src_sharding, partition_id, b);
HloInstruction* result = hlo;
auto zero = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(hlo->shape().element_type())));
std::vector<int64_t> expand_dims_without_halo_exchange;
for (auto dim : expand_tile_dims) {
int64_t src_shard_count = src_sharding.tile_assignment().dim(dim);
int64_t src_per_shard_size =
padded_src_shape.dimensions(dim) / src_shard_count;
int64_t dst_per_shard_size =
padded_dst_shape.dimensions(dim) / src_shard_count;
if (src_per_shard_size >= dst_per_shard_size) {
continue;
}
if (src_shard_count == 1) {
expand_dims_without_halo_exchange.emplace_back(dim);
continue;
}
OffsetCalculation left_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
src_per_shard_size - dst_per_shard_size, 0, 1));
OffsetCalculation right_halo_size_function =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
dst_per_shard_size - src_per_shard_size,
dst_per_shard_size - src_per_shard_size, 1));
result = ExchangeHaloCompact(result, base_shape, left_halo_size_function,
right_halo_size_function, nullptr, dim,
src_sharding, partition_ordinals[dim],
collective_ops_creator, next_channel_id, b);
}
if (!expand_dims_without_halo_exchange.empty()) {
std::vector<int64_t> zero_padding(result->shape().rank());
PaddingConfig pad_config = window_util::MakeSymmetricPadding(zero_padding);
auto padded_shape = result->shape();
for (auto dim : expand_dims_without_halo_exchange) {
pad_config.mutable_dimensions(dim)->set_edge_padding_low(0);
pad_config.mutable_dimensions(dim)->set_edge_padding_high(
padded_dst_shape.dimensions(dim) - padded_src_shape.dimensions(dim));
padded_shape.set_dimensions(dim, result->shape().dimensions(dim) +
padded_dst_shape.dimensions(dim) -
padded_src_shape.dimensions(dim));
}
result = b->AddInstruction(
HloInstruction::CreatePad(padded_shape, result, zero, pad_config));
}
return result;
}
std::optional<int64_t> UniqueTiledDim(const HloSharding& sharding) {
if (sharding.IsTileMaximal()) {
return std::nullopt;
}
int64_t dim = -1;
int64_t rank = sharding.ReplicateOnLastTileDim()
? sharding.tile_assignment().num_dimensions() - 1
: sharding.tile_assignment().num_dimensions();
for (int64_t i = 0; i < rank; ++i) {
if (sharding.tile_assignment().dim(i) > 1) {
if (dim != -1) {
return std::nullopt;
}
dim = i;
}
}
CHECK_NE(dim, -1);
return dim;
}
MultiplyAddDivideOffsetCalculation::MultiplyAddDivideOffsetCalculation(
int64_t multiplier, int64_t offset, int64_t divisor)
: multiplier_(multiplier), offset_(offset), divisor_(divisor) {
CHECK_GT(divisor_, 0);
Simplify();
}
OffsetCalculation MultiplyAddDivideOffsetCalculation::operator-(
const MultiplyAddDivideOffsetCalculation& other) const {
if (divisor_ == 1 && other.divisor_ == 1) {
return OffsetCalculation(MultiplyAddDivideOffsetCalculation(
multiplier_ - other.multiplier_, offset_ - other.offset_, 1));
}
return OffsetCalculation(HloOpcode::kSubtract, *this, other);
}
OffsetCalculation MultiplyAddDivideOffsetCalculation::operator+(
const MultiplyAddDivideOffsetCalculation& other) const {
if (divisor_ == 1 && other.divisor_ == 1) {
return OffsetCalculation(MultiplyAddDivideOffsetCalculation(
multiplier_ + other.multiplier_, offset_ + other.offset_, 1));
}
return OffsetCalculation(HloOpcode::kAdd, *this, other);
}
void MultiplyAddDivideOffsetCalculation::Simplify() {
if (divisor_ != 1 && multiplier_ % divisor_ == 0 &&
(offset_ % divisor_ == 0 || offset_ * multiplier_ > 0)) {
multiplier_ /= divisor_;
offset_ /= divisor_;
divisor_ = 1;
}
}
int64_t MultiplyAddDivideOffsetCalculation::Calculate(
int64_t shard_ordinal) const {
return (shard_ordinal * multiplier_ + offset_) / divisor_;
}
HloInstruction* MultiplyAddDivideOffsetCalculation::Calculate(
HloInstruction* shard_ordinal, SpmdBuilder* b) const {
auto scalar_shape = ShapeUtil::MakeShape(S32, {});
if (multiplier_ == 0) {
return b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(offset_ / divisor_)));
}
HloInstruction* result = shard_ordinal;
if (multiplier_ != 1) {
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, shard_ordinal,
b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(multiplier_)))));
}
if (offset_ != 0) {
auto offset = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(offset_)));
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, result, offset));
}
if (divisor_ != 1) {
auto divisor = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(divisor_)));
result = b->AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kDivide, result, divisor));
}
return result;
}
int64_t MultiplyAddDivideOffsetCalculation::MaxInRange(
int64_t start_ordinal, int64_t limit_ordinal) const {
int64_t max = Calculate(start_ordinal);
for (int64_t i = start_ordinal + 1; i < limit_ordinal; ++i) {
max = std::max(max, Calculate(i));
}
return max;
}
OffsetCalculation& OffsetCalculation::operator=(
const OffsetCalculation& other) {
opcode_ = other.opcode_;
copy_from_ = other.copy_from_;
if (opcode_ != HloOpcode::kCopy) {
lhs_ = std::make_unique<OffsetCalculation>(*other.lhs_);
rhs_ = std::make_unique<OffsetCalculation>(*other.rhs_);
}
return *this;
}
bool OffsetCalculation::IsConstant() const {
if (opcode_ == HloOpcode::kCopy) {
return copy_from_.IsConstant();
}
if (opcode_ == HloOpcode::kSubtract && *lhs_ == *rhs_) {
return true;
}
return lhs_->IsConstant() && rhs_->IsConstant();
}
OffsetCalculation OffsetCalculation::operator-(
const OffsetCalculation& other) const {
if (opcode_ == HloOpcode::kCopy && other.opcode_ == HloOpcode::kCopy) {
return copy_from_ - other.copy_from_;
}
return OffsetCalculation(HloOpcode::kSubtract, *this, other);
}
OffsetCalculation OffsetCalculation::operator+(
const OffsetCalculation& other) const {
if (opcode_ == HloOpcode::kCopy && other.opcode_ == HloOpcode::kCopy) {
return copy_from_ + other.copy_from_;
}
return OffsetCalculation(HloOpcode::kAdd, *this, other);
}
bool OffsetCalculation::operator==(const OffsetCalculation& other) const {
if (opcode_ != other.opcode_) {
return false;
}
if (opcode_ == HloOpcode::kCopy) {
return copy_from_ == other.copy_from_;
}
return *lhs_ == *other.lhs_ && *rhs_ == *other.rhs_;
}
int64_t OffsetCalculation::Calculate(int64_t shard_ordinal) const {
switch (opcode_) {
case HloOpcode::kAdd:
return lhs_->Calculate(shard_ordinal) + rhs_->Calculate(shard_ordinal);
case HloOpcode::kCopy:
return copy_from_.Calculate(shard_ordinal);
case HloOpcode::kSubtract:
return lhs_->Calculate(shard_ordinal) - rhs_->Calculate(shard_ordinal);
case HloOpcode::kMultiply:
return lhs_->Calculate(shard_ordinal) * rhs_->Calculate(shard_ordinal);
default:
LOG(FATAL) << "Should not happen";
}
}
HloInstruction* OffsetCalculation::Calculate(HloInstruction* shard_ordinal,
SpmdBuilder* b) const {
if (opcode_ == HloOpcode::kCopy) {
return copy_from_.Calculate(shard_ordinal, b);
}
auto lhs = lhs_->Calculate(shard_ordinal, b);
auto rhs = rhs_->Calculate(shard_ordinal, b);
return b->AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), opcode_, lhs, rhs));
}
int64_t OffsetCalculation::MaxInRange(int64_t start_ordinal,
int64_t limit_ordinal) const {
if (IsConstant()) {
return Calculate(start_ordinal);
}
if (opcode_ == HloOpcode::kCopy) {
return std::max(Calculate(start_ordinal), Calculate(limit_ordinal - 1));
}
int64_t max = Calculate(start_ordinal);
for (int64_t i = start_ordinal + 1; i < limit_ordinal; ++i) {
max = std::max(max, Calculate(i));
}
return max;
}
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo, const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function, int64_t dim,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
int64_t input_shard_size = hlo->shape().dimensions(dim);
int64_t shard_count = target.tile_assignment().dim(dim);
std::vector<HloInstruction*> concat_pieces;
int64_t max_left_halo_size =
left_halo_size_function.MaxInRange(1, shard_count);
int64_t max_right_halo_size =
right_halo_size_function.MaxInRange(0, shard_count - 1);
if (max_left_halo_size + max_right_halo_size + input_shard_size >=
input_shard_size * shard_count &&
(max_left_halo_size > input_shard_size ||
max_right_halo_size > input_shard_size)) {
return std::nullopt;
}
const int64_t left_bound =
-left_halo_size_function.MaxInRange(0, shard_count);
const int64_t right_bound =
input_shard_size + right_halo_size_function.MaxInRange(0, shard_count);
if (left_bound >= right_bound) {
return std::nullopt;
}
for (int64_t i = CeilOfRatio(max_left_halo_size, input_shard_size) - 1;
i >= 0 && (-i - 1) * input_shard_size < right_bound; --i) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
if (indices[dim] > i) {
std::vector<int64_t> source_indices(indices.begin(), indices.end());
source_indices[dim] -= i + 1;
source_target_pairs.emplace_back(
target.tile_assignment()(source_indices), device);
}
});
int64_t halo_size_including_skips =
std::min(max_left_halo_size - input_shard_size * i, input_shard_size);
int64_t halo_right_skips =
std::max<int64_t>(-i * input_shard_size - right_bound, 0);
int64_t halo_size = halo_size_including_skips - halo_right_skips;
auto halo_shape = hlo->shape();
auto source_halo_slice = hlo;
if (halo_size != hlo->shape().dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] =
hlo->shape().dimensions(dim) - halo_size_including_skips;
std::vector<int64_t> halo_limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
halo_limit_indices[dim] -= halo_right_skips;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
auto left_halo =
collective_ops_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
concat_pieces.push_back(left_halo);
}
if (left_bound < input_shard_size && right_bound > 0) {
int64_t self_start = std::max<int64_t>(0, left_bound);
int64_t self_limit = std::min<int64_t>(input_shard_size, right_bound);
if (self_start == 0 && self_limit == input_shard_size) {
concat_pieces.push_back(hlo);
} else {
auto self_shape = hlo->shape();
self_shape.set_dimensions(dim, self_limit - self_start);
std::vector<int64_t> start_indices(self_shape.rank(), 0);
start_indices[dim] = self_start;
std::vector<int64_t> limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
limit_indices[dim] = self_limit;
std::vector<int64_t> slice_strides(self_shape.rank(), 1);
concat_pieces.push_back(b->AddInstruction(HloInstruction::CreateSlice(
self_shape, hlo, start_indices, limit_indices, slice_strides)));
}
}
int64_t skipped_right_halos =
std::min<int64_t>(std::max<int64_t>(left_bound - input_shard_size, 0),
std::max<int64_t>(max_right_halo_size, 0)) /
input_shard_size;
for (int64_t i = skipped_right_halos;
i < CeilOfRatio(max_right_halo_size, input_shard_size); ++i) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
target.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t device) {
if (indices[dim] > i) {
std::vector<int64_t> target_indices(indices.begin(), indices.end());
target_indices[dim] -= i + 1;
source_target_pairs.emplace_back(
device, target.tile_assignment()(target_indices));
}
});
int64_t halo_size_including_skips =
std::min(max_right_halo_size - input_shard_size * i, input_shard_size);
int64_t halo_left_skips =
std::max<int64_t>(left_bound - (i + 1) * input_shard_size, 0);
int64_t halo_size = halo_size_including_skips - halo_left_skips;
auto halo_shape = hlo->shape();
HloInstruction* source_halo_slice = hlo;
if (halo_size != halo_shape.dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] = halo_left_skips;
std::vector<int64_t> halo_limit_indices(halo_shape.dimensions().begin(),
halo_shape.dimensions().end());
halo_limit_indices[dim] += halo_left_skips;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
auto right_halo =
collective_ops_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
concat_pieces.push_back(right_halo);
}
auto concat = concat_pieces[0];
if (concat_pieces.size() > 1) {
auto concat_shape = hlo->shape();
int64_t concat_dim_size = 0;
for (auto piece : concat_pieces) {
concat_dim_size += piece->shape().dimensions(dim);
}
concat_shape.set_dimensions(dim, concat_dim_size);
concat = b->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, concat_pieces, dim));
}
return concat;
}
HloInstruction* ExchangeHaloCompact(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
HloInstruction* pad_value, int64_t dim, const HloSharding& sharding,
HloInstruction* shard_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
int64_t input_shard_size = hlo->shape().dimensions(dim);
int64_t shard_count = sharding.tile_assignment().dim(dim);
auto grouped =
hlo_sharding_util::GroupShardingOnAllDimsExcept(sharding, {dim});
auto g_creator = GetPerGroupCollectiveOpsCreator(collective_ops_creator,
grouped.device_groups);
const bool ignore_pad_vale = pad_value == nullptr;
if (ignore_pad_vale) {
pad_value = CreateR0WithType(hlo->shape().element_type(), 0, b);
}
struct Halo {
int64_t my_index;
int64_t start;
int64_t limit;
int64_t cp_idx;
int64_t halo_offset;
int64_t halo_at_shard;
};
std::vector<std::vector<Halo>> halos(shard_count);
constexpr int64_t kPaddingShard = -2;
constexpr int64_t kSelfShard = -1;
int64_t max_window_size = 0;
for (int64_t i = 0; i < shard_count; ++i) {
const int64_t start =
i * input_shard_size - left_halo_size_function.Calculate(i);
int64_t next_start = start;
const int64_t limit =
(i + 1) * input_shard_size + right_halo_size_function.Calculate(i);
max_window_size = std::max(max_window_size, limit - start);
while (next_start < limit) {
Halo& halo = halos[i].emplace_back();
halo.my_index = i;
halo.halo_offset = next_start - start;
halo.start = next_start % input_shard_size;
if (halo.start < 0) {
halo.start += input_shard_size;
}
int64_t size = limit - next_start;
if (next_start < 0 || next_start >= base_shape.dimensions(dim)) {
if (next_start < 0) {
size = std::min(size, 0 - next_start);
}
VLOG(3) << "Halo for shard i " << i << ": pad, size " << size;
halo.limit = halo.start + size;
halo.cp_idx = kPaddingShard;
next_start += size;
continue;
}
size = std::min(input_shard_size - halo.start, size);
halo.limit = halo.start + size;
int64_t shard = next_start / input_shard_size;
halo.halo_at_shard = shard;
halo.cp_idx = kSelfShard;
next_start += size;
VLOG(3) << "Halo for shard i " << i << ": shard " << shard << ", size "
<< size << ", start " << halo.start;
}
}
std::vector<std::vector<std::pair<int64_t, int64_t>>> src_to_dst(shard_count);
{
std::vector<std::vector<Halo>> halos2(shard_count);
std::vector<int64_t> next_halo_idx(halos2.size(), 0);
while (true) {
bool all_padding = true;
bool empty = true;
for (int64_t i = 0; i < halos.size(); ++i) {
if (next_halo_idx[i] >= halos[i].size()) {
continue;
}
if (halos[i][next_halo_idx[i]].cp_idx != kPaddingShard) {
all_padding = false;
}
empty = false;
}
if (empty) {
break;
}
for (int64_t i = 0; i < halos.size(); ++i) {
if (next_halo_idx[i] >= halos[i].size()) {
continue;
}
Halo& h = halos[i][next_halo_idx[i]];
halos2[i].push_back(h);
Halo& new_h = halos2[i].back();
if (!all_padding && h.cp_idx == kPaddingShard &&
h.limit > input_shard_size) {
new_h.limit = input_shard_size;
h.start = 0;
h.limit -= input_shard_size;
VLOG(3) << "Split padding halo for shard i " << i << ": size "
<< new_h.limit - new_h.start;
} else {
next_halo_idx[i] += 1;
}
if (h.cp_idx != kPaddingShard && h.halo_at_shard != i) {
src_to_dst[h.halo_at_shard].emplace_back(i, halos2[i].size() - 1);
}
}
}
halos = std::move(halos2);
}
for (int64_t i = 0; i < src_to_dst.size(); ++i) {
absl::c_stable_sort(src_to_dst[i],
[&](const std::pair<int64_t, int64_t>& a,
const std::pair<int64_t, int64_t>& b) {
return halos[a.first][a.second].halo_offset <
halos[b.first][b.second].halo_offset;
});
}
std::vector<std::pair<HloInstruction*, int64_t>> cps;
std::vector<int64_t> next_dst_idx(src_to_dst.size(), 0);
while (true) {
std::vector<std::pair<int64_t, int64_t>> source_target_pairs;
std::vector<bool> dst_seen(shard_count, false);
int64_t start = input_shard_size;
int64_t limit = 0;
for (int64_t i = 0; i < src_to_dst.size(); ++i) {
if (src_to_dst[i].size() <= next_dst_idx[i]) {
continue;
}
const auto& halo_idx = src_to_dst[i][next_dst_idx[i]];
Halo& halo = halos[halo_idx.first][halo_idx.second];
if (!source_target_pairs.empty() &&
(dst_seen[halo.my_index] ||
(start > halo.limit && limit == input_shard_size &&
halo.start == 0) ||
(limit < halo.start && start == 0 &&
halo.limit == input_shard_size))) {
continue;
}
halo.cp_idx = cps.size();
dst_seen[halo.my_index] = true;
source_target_pairs.emplace_back(i, halo.my_index);
start = std::min(start, halo.start);
limit = std::max(limit, halo.limit);
next_dst_idx[i] += 1;
}
if (source_target_pairs.empty()) {
break;
}
CHECK_LT(start, limit);
const int64_t halo_size = limit - start;
Shape halo_shape = hlo->shape();
HloInstruction* source_halo_slice = hlo;
if (halo_size != hlo->shape().dimensions(dim)) {
halo_shape.set_dimensions(dim, halo_size);
std::vector<int64_t> halo_start_indices(halo_shape.rank(), 0);
halo_start_indices[dim] = start;
std::vector<int64_t> halo_limit_indices(hlo->shape().dimensions().begin(),
hlo->shape().dimensions().end());
halo_limit_indices[dim] = limit;
std::vector<int64_t> halo_slice_strides(halo_shape.rank(), 1);
source_halo_slice = b->AddInstruction(
HloInstruction::CreateSlice(halo_shape, hlo, halo_start_indices,
halo_limit_indices, halo_slice_strides));
}
HloInstruction* cp = g_creator.create_cross_partition_collective_permute(
b, source_halo_slice, source_target_pairs, (*next_channel_id)++);
VLOG(3) << "Halo collective-permute created: " << cp->ToString();
cps.emplace_back(cp, start);
}
std::vector<HloInstruction*> concat_pieces;
Shape concat_shape = hlo->shape();
concat_shape.set_dimensions(dim, 0);
int64_t self_piece_start = input_shard_size;
bool all_padding = true;
for (int64_t current_halo_idx = 0; true; ++current_halo_idx) {
int64_t max_size = 0;
constexpr int64_t kUnseen = -5;
std::vector<int64_t> cp_index(halos.size(), kUnseen);
int64_t min_self_start = input_shard_size;
int64_t max_self_limit = 0;
for (int64_t i = 0; i < halos.size(); ++i) {
if (current_halo_idx >= halos[i].size()) {
continue;
}
const Halo& halo = halos[i][current_halo_idx];
cp_index[i] = halo.cp_idx;
if (halo.cp_idx >= 0) {
max_size =
std::max(max_size, cps[cp_index[i]].first->shape().dimensions(dim));
} else if (halo.cp_idx == kSelfShard) {
min_self_start = std::min(min_self_start, halo.start);
max_self_limit = std::max(max_self_limit, halo.limit);
max_size = std::max(max_size, max_self_limit - min_self_start);
} else {
max_size = std::max(max_size, halo.limit - halo.start);
}
}
if (absl::c_all_of(cp_index, [&](int64_t idx) { return idx == kUnseen; })) {
break;
}
min_self_start -= max_size - (max_self_limit - min_self_start);
min_self_start = std::max<int64_t>(min_self_start, 0);
if (current_halo_idx == 0) {
self_piece_start = min_self_start;
}
concat_shape.set_dimensions(dim, max_size + concat_shape.dimensions(dim));
Shape piece_shape = hlo->shape();
piece_shape.set_dimensions(dim, max_size);
HloInstruction* padding = b->AddInstruction(
HloInstruction::CreateBroadcast(piece_shape, pad_value, {}));
std::vector<HloInstruction*> unique_pieces;
std::vector<int64_t> slices_cache(cps.size() + 2, kUnseen);
std::vector<int32_t> piece_index(halos.size());
for (int64_t i = 0; i < halos.size(); ++i) {
HloInstruction* piece;
int64_t cache_idx = cp_index[i];
if (cp_index[i] >= 0) {
all_padding = false;
piece = cps[cp_index[i]].first;
} else if (cp_index[i] == kSelfShard) {
if (hlo->shape().dimensions(dim) == max_size) {
piece = hlo;
} else {
std::vector<int64_t> starts(piece_shape.rank(), 0);
starts[dim] = min_self_start;
std::vector<int64_t> limits(piece_shape.dimensions().begin(),
piece_shape.dimensions().end());
std::vector<int64_t> strides(piece_shape.rank(), 1);
limits[dim] += min_self_start;
piece = b->AddInstruction(HloInstruction::CreateSlice(
piece_shape, hlo, starts, limits, strides));
}
cache_idx = cps.size();
all_padding = false;
} else {
piece = padding;
cache_idx = cps.size() + 1;
}
if (slices_cache[cache_idx] != kUnseen) {
piece_index[i] = slices_cache[cache_idx];
continue;
}
if (piece->shape().dimensions(dim) != max_size) {
PaddingConfig pc;
for (int64_t k = 0; k < piece_shape.rank(); ++k) {
auto pc_dim = pc.add_dimensions();
pc_dim->set_interior_padding(0);
pc_dim->set_edge_padding_low(0);
pc_dim->set_edge_padding_high(0);
if (k != dim) {
continue;
}
int64_t padding_size = max_size - piece->shape().dimensions(dim);
if (concat_pieces.empty()) {
pc_dim->set_edge_padding_low(padding_size);
} else {
pc_dim->set_edge_padding_high(padding_size);
}
}
piece = b->AddInstruction(
HloInstruction::CreatePad(piece_shape, piece, pad_value, pc));
}
piece_index[i] = unique_pieces.size();
unique_pieces.push_back(piece);
slices_cache[cache_idx] = piece_index[i];
}
HloInstruction* selector =
TableLookup<int32_t>(piece_index, S32, shard_ordinal, b);
int64_t init_piece = 0;
if (unique_pieces.size() > 1 && unique_pieces[init_piece] == padding) {
init_piece = 1;
}
HloInstruction* selected = unique_pieces[init_piece];
for (int64_t i = init_piece + 1; i < unique_pieces.size(); ++i) {
if (unique_pieces[i] == padding) {
continue;
}
HloInstruction* pred = b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeScalarShape(PRED), selector,
CreateR0WithType(S32, i, b), ComparisonDirection::kEq));
pred = b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(PRED, selected->shape().dimensions()), pred,
{}));
selected = b->AddInstruction(
HloInstruction::CreateTernary(selected->shape(), HloOpcode::kSelect,
pred, unique_pieces[i], selected));
}
concat_pieces.push_back(selected);
}
if (all_padding) {
concat_shape.set_dimensions(dim, max_window_size);
return b->AddInstruction(
HloInstruction::CreateBroadcast(concat_shape, pad_value, {}));
}
CHECK_GE(concat_shape.dimensions(dim), max_window_size);
HloInstruction* concat;
if (concat_pieces.size() == 1) {
concat = concat_pieces[0];
} else {
concat = b->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, concat_pieces, dim));
}
std::vector<int32_t> slice_offset(halos.size(), 0);
std::vector<int32_t> non_padding_starts(halos.size(), 0);
std::vector<int32_t> non_padding_limits(halos.size(), 0);
const int64_t first_piece_size = concat_pieces[0]->shape().dimensions(dim);
int64_t padded_concat_size = concat_shape.dimensions(dim);
for (int64_t i = 0; i < halos.size(); ++i) {
if (halos[i].empty()) {
continue;
}
const Halo& halo = halos[i][0];
for (int64_t j = 0; j < halos[i].size(); ++j) {
if (halos[i][j].cp_idx != kPaddingShard) {
break;
}
non_padding_starts[i] += halos[i][j].limit - halos[i][j].start;
}
non_padding_limits[i] = left_halo_size_function.Calculate(i) +
right_halo_size_function.Calculate(i) +
input_shard_size;
int64_t high_padding = right_halo_size_function.Calculate(i) +
input_shard_size * (i + 1) -
base_shape.dimensions(dim);
if (high_padding > 0) {
non_padding_limits[i] -= high_padding;
}
if (halo.cp_idx >= 0) {
slice_offset[i] = halo.start - cps[halo.cp_idx].second +
first_piece_size -
cps[halo.cp_idx].first->shape().dimensions(dim);
} else if (halo.cp_idx == kSelfShard) {
slice_offset[i] = halo.start - self_piece_start;
} else {
slice_offset[i] = first_piece_size - (halo.limit - halo.start);
}
padded_concat_size =
std::max(padded_concat_size, slice_offset[i] + max_window_size);
}
if (padded_concat_size > concat_shape.dimensions(dim)) {
PaddingConfig pc;
for (int64_t k = 0; k < concat_shape.rank(); ++k) {
auto pc_dim = pc.add_dimensions();
pc_dim->set_interior_padding(0);
pc_dim->set_edge_padding_low(0);
pc_dim->set_edge_padding_high(0);
if (k != dim) {
continue;
}
pc_dim->set_edge_padding_high(padded_concat_size -
concat_shape.dimensions(dim));
}
concat_shape.set_dimensions(dim, padded_concat_size);
concat = b->AddInstruction(
HloInstruction::CreatePad(concat_shape, concat, pad_value, pc));
}
if (concat_shape.dimensions(dim) > max_window_size) {
Shape result_shape = concat_shape;
result_shape.set_dimensions(dim, max_window_size);
std::vector<HloInstruction*> offsets(result_shape.rank(),
CreateR0WithType(S32, 0, b));
offsets[dim] = TableLookup<int32_t>(slice_offset, S32, shard_ordinal, b);
concat = b->AddInstruction(HloInstruction::CreateDynamicSlice(
result_shape, concat, offsets, result_shape.dimensions()));
}
if (ignore_pad_vale) {
return concat;
}
HloInstruction* iota = b->AddInstruction(HloInstruction::CreateIota(
ShapeUtil::ChangeElementType(concat->shape(), S32), dim));
HloInstruction* valid_limit =
b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(concat->shape(), S32),
TableLookup<int32_t>(non_padding_limits, S32, shard_ordinal, b), {}));
HloInstruction* mask = b->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(concat->shape(), PRED), iota, valid_limit,
ComparisonDirection::kLt));
if (absl::c_any_of(non_padding_starts,
[](const int32_t s) { return s > 0; })) {
HloInstruction* valid_start =
b->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(concat->shape(), S32),
TableLookup<int32_t>(non_padding_starts, S32, shard_ordinal, b),
{}));
mask = b->AddInstruction(HloInstruction::CreateBinary(
mask->shape(), HloOpcode::kAnd, mask,
b->AddInstruction(HloInstruction::CreateCompare(
mask->shape(), iota, valid_start, ComparisonDirection::kGe))));
}
HloInstruction* padding = b->AddInstruction(
HloInstruction::CreateBroadcast(concat->shape(), pad_value, {}));
return b->AddInstruction(HloInstruction::CreateTernary(
concat->shape(), HloOpcode::kSelect, mask, concat, padding));
}
std::optional<HloInstruction*> ExchangeHalo(
HloInstruction* hlo,
std::vector<OffsetCalculation> left_halo_size_functions,
std::vector<OffsetCalculation> right_halo_size_functions,
const HloSharding& target,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b) {
CHECK(left_halo_size_functions.size() == hlo->shape().rank());
CHECK(right_halo_size_functions.size() == hlo->shape().rank());
HloInstruction* visiting_hlo = hlo;
for (int dim = 0; dim < hlo->shape().rank(); ++dim) {
auto concat = ExchangeHalo(visiting_hlo, left_halo_size_functions[dim],
right_halo_size_functions[dim], dim, target,
collective_ops_creator, next_channel_id, b);
if (!concat) {
return std::nullopt;
}
visiting_hlo = *concat;
}
return visiting_hlo;
}
std::optional<HloInstruction*> ExchangeHaloAndGetValidData(
HloInstruction* hlo, const Shape& base_shape,
const OffsetCalculation& left_halo_size_function,
const OffsetCalculation& right_halo_size_function,
int64_t explicit_left_padding_on_full_shape, int64_t padded_full_shape_size,
int64_t shard_size_with_halo, int64_t dim, const HloSharding& target,
HloInstruction* offset_on_padded_shape, HloInstruction* pad_value,
HloInstruction* partition_ordinal,
const SPMDCollectiveOpsCreator& collective_ops_creator,
int64_t* next_channel_id, SpmdBuilder* b, bool mask_invalid_region,
bool force_mask_in_compact) {
int64_t shard_count = target.tile_assignment().dim(dim);
if (explicit_left_padding_on_full_shape ==
left_halo_size_function.Calculate(0)) {
int64_t max_halo =
std::max(left_halo_size_function.MaxInRange(0, shard_count),
right_halo_size_function.MaxInRange(0, shard_count));
int64_t max_shard_size =
hlo->shape().dimensions(dim) +
(left_halo_size_function + right_halo_size_function)
.MaxInRange(0, shard_count);
if (max_shard_size == shard_size_with_halo &&
max_halo > 2 * shard_size_with_halo) {
if (max_shard_size * 2 >= shard_count * hlo->shape().dimensions(dim)) {
return std::nullopt;
}
return ExchangeHaloCompact(
hlo, base_shape, left_halo_size_function, right_halo_size_function,
mask_invalid_region || force_mask_in_compact ? pad_value : nullptr,
dim, target, partition_ordinal, collective_ops_creator,
next_channel_id, b);
}
}
auto halo_exchange_result =
ExchangeHalo(hlo, left_halo_size_function, right_halo_size_function, dim,
target, collective_ops_creator, next_channel_id, b);
if (!halo_exchange_result) {
return std::nullopt;
}
auto concat = *halo_exchange_result;
int64_t max_left_halo_size =
left_halo_size_function.MaxInRange(1, shard_count);
int64_t max_left_halo_or_padding_size =
std::max(max_left_halo_size, explicit_left_padding_on_full_shape);
auto start_offset_on_padded_concat_calculation =
OffsetCalculation(MultiplyAddDivideOffsetCalculation(
0, max_left_halo_or_padding_size, 1)) -
left_halo_size_function;
int64_t extra_left_padding =
std::max(int64_t{0}, max_left_halo_or_padding_size -
std::max(int64_t{0}, max_left_halo_size));
int64_t extra_right_padding =
start_offset_on_padded_concat_calculation.MaxInRange(0, shard_count) +
shard_size_with_halo - concat->shape().dimensions(dim) -
extra_left_padding;
extra_right_padding = std::max(int64_t{0}, extra_right_padding);
if (extra_left_padding > 0 || extra_right_padding > 0) {
PaddingConfig padding_config;
auto padded_concat_shape = concat->shape();
for (int64_t i = 0; i < base_shape.rank(); ++i) {
auto padding_config_dim = padding_config.add_dimensions();
padding_config_dim->set_interior_padding(0);
padding_config_dim->set_edge_padding_low(0);
padding_config_dim->set_edge_padding_high(0);
if (i != dim) {
continue;
}
padding_config_dim->set_edge_padding_low(extra_left_padding);
padding_config_dim->set_edge_padding_high(extra_right_padding);
padded_concat_shape.set_dimensions(dim, concat->shape().dimensions(dim) +
extra_left_padding +
extra_right_padding);
}
concat = b->AddInstruction(HloInstruction::CreatePad(
padded_concat_shape, concat, pad_value, padding_config));
}
auto valid_slice = concat;
if (shard_size_with_halo != concat->shape().dimensions(dim)) {
CHECK_LT(shard_size_with_halo, concat->shape().dimensions(dim));
auto slice_shape = concat->shape();
slice_shape.set_dimensions(dim, shard_size_with_halo);
if (left_halo_size_function.IsConstant() &&
left_halo_size_function.Calculate(0) ==
explicit_left_padding_on_full_shape) {
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> strides(slice_shape.rank(), 1);
valid_slice = b->AddInstruction(
HloInstruction::CreateSlice(slice_shape, concat, start_indices,
slice_shape.dimensions(), strides));
} else {
auto zero = b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
std::vector<HloInstruction*> slice_offsets(base_shape.rank(), zero);
slice_offsets[dim] = start_offset_on_padded_concat_calculation.Calculate(
partition_ordinal, b);
valid_slice = b->AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape, concat, slice_offsets, slice_shape.dimensions()));
}
}
if (!mask_invalid_region) {
return valid_slice;
}
int64_t total_right_padding = padded_full_shape_size -
base_shape.dimensions(dim) -
explicit_left_padding_on_full_shape;
if (explicit_left_padding_on_full_shape > 0 || total_right_padding > 0) {
auto index_shape = ShapeUtil::ChangeElementType(valid_slice->shape(), S32);
auto iota = b->AddInstruction(HloInstruction::CreateIota(index_shape, dim));
auto broadcast_start_index_in_padded_shape =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape, offset_on_padded_shape, {}));
auto index_in_padded_shape = b->AddInstruction(
HloInstruction::CreateBinary(index_shape, HloOpcode::kAdd, iota,
broadcast_start_index_in_padded_shape));
auto mask_shape = ShapeUtil::ChangeElementType(index_shape, PRED);
std::vector<HloInstruction*> predicates;
if (explicit_left_padding_on_full_shape > 0) {
auto valid_index_start =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape,
b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
explicit_left_padding_on_full_shape))),
{}));
predicates.push_back(b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_padded_shape, valid_index_start,
ComparisonDirection::kGe)));
}
if (total_right_padding > 0) {
auto valid_index_limit =
b->AddInstruction(HloInstruction::CreateBroadcast(
index_shape,
b->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
base_shape.dimensions(dim) +
explicit_left_padding_on_full_shape))),
{}));
predicates.push_back(b->AddInstruction(HloInstruction::CreateCompare(
mask_shape, index_in_padded_shape, valid_index_limit,
ComparisonDirection::kLt)));
}
CHECK(!predicates.empty());
auto is_valid =
predicates.size() == 2
? b->AddInstruction(HloInstruction::CreateBinary(
mask_shape, HloOpcode::kAnd, predicates[0], predicates[1]))
: predicates[0];
if (pad_value->shape().element_type() !=
valid_slice->shape().element_type()) {
pad_value = b->AddInstruction(HloInstruction::CreateConvert(
ShapeUtil::MakeShape(valid_slice->shape().element_type(),
pad_value->shape().dimensions()),
pad_value));
}
auto masking_value = b->AddInstruction(
HloInstruction::CreateBroadcast(valid_slice->shape(), pad_value, {}));
valid_slice = b->AddInstruction(
HloInstruction::CreateTernary(valid_slice->shape(), HloOpcode::kSelect,
is_valid, valid_slice, masking_value));
}
return valid_slice;
}
HloInstruction* HaloExchangeToPadOnLeft(PartitionedHlo& original,
absl::Span<const int64_t> dims) {
if (original.sharding().IsTileMaximal()) {
return original.hlo();
}
Window window;
for (int64_t i = 0; i < original.base_shape().rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
int64_t low_padding = 0;
if (absl::c_linear_search(dims, i)) {
low_padding = RoundUpTo(original.base_shape().dimensions(i),
original.sharding().tile_assignment().dim(i)) -
original.base_shape().dimensions(i);
}
dim->set_padding_low(low_padding);
dim->set_padding_high(0);
dim->set_base_dilation(1);
}
auto reshard_window = original.ReshardAsWindowedInput(
window, original.sharding(),
CreateZero(ShapeUtil::MakeShape(original.base_shape().element_type(), {}),
original.state().b),
false);
if (!reshard_window.has_value()) {
return nullptr;
}
CHECK(!reshard_window->dynamic_slice_index_on_output.has_value());
return reshard_window->sharded_input;
}
bool IsNanSafeGt(HloComputation* comp) {
namespace m = match;
auto match_bitcast_f32 = [](int64_t parameter_number) {
auto param = m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
auto match_bitcast_bf16 = [](int64_t parameter_number) {
auto param = m::Convert(m::Parameter(parameter_number)
.WithShape(m::Shape().WithElementType(BF16)))
.WithShape(m::Shape().WithElementType(F32));
auto param_s32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(S32));
auto param_u32 =
m::BitcastConvert(param).WithShape(m::Shape().WithElementType(U32));
return m::Select(
m::Lt(param_s32, m::ConstantScalar(0)),
m::BitcastConvert(
m::Subtract(m::ConstantScalar(std::numeric_limits<int32_t>::max()),
param_u32))
.WithShape(m::Shape().WithElementType(S32)),
param_s32);
};
if (comp->root_instruction()->opcode() == HloOpcode::kSelect) {
return Match(comp->root_instruction()->operand(2),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction()->operand(2),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1)));
}
return Match(comp->root_instruction(),
m::Gt(match_bitcast_f32(0), match_bitcast_f32(1))) ||
Match(comp->root_instruction(),
m::Gt(match_bitcast_bf16(0), match_bitcast_bf16(1)));
}
std::optional<int64_t> GetKValueInTopKWhenPartitionSortDim(
HloInstruction* hlo) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(hlo);
if (sort == nullptr || sort->operand_count() != 2) {
return std::nullopt;
}
if (!IsNanSafeGt(sort->to_apply())) {
return std::nullopt;
}
HloInstruction* data = sort->mutable_operand(0);
HloIotaInstruction* iota =
DynCast<HloIotaInstruction>(sort->mutable_operand(1));
const PrimitiveType element_type = data->shape().element_type();
if (iota == nullptr || iota->shape().element_type() != S32 ||
iota->opcode() != HloOpcode::kIota ||
iota->iota_dimension() != sort->sort_dimension()) {
return std::nullopt;
}
const int64_t sort_dim = sort->sort_dimension();
if (element_type != F32 && element_type != BF16 && element_type != S32 &&
element_type != U32) {
return std::nullopt;
}
bool supported = true;
std::optional<int64_t> k;
for (HloInstruction* gte : sort->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
supported = false;
break;
}
const HloInstruction* slice = gte->users()[0];
if (slice->opcode() != HloOpcode::kSlice) {
supported = false;
break;
}
if (absl::c_any_of(slice->slice_starts(), [](int x) { return x != 0; }) ||
absl::c_any_of(slice->slice_strides(), [](int x) { return x != 1; })) {
supported = false;
break;
}
for (int64_t dim = 0; dim < data->shape().dimensions_size(); dim++) {
if (dim == sort_dim) {
continue;
}
if (slice->slice_limits(dim) !=
slice->operand(0)->shape().dimensions(dim)) {
supported = false;
break;
}
}
if (!k.has_value()) {
k = slice->slice_limits(sort_dim);
} else if (k != slice->slice_limits(sort_dim)) {
supported = false;
break;
}
}
if (k == std::nullopt || !supported) {
return std::nullopt;
}
if (!data->has_sharding()) {
return std::nullopt;
}
const HloSharding& sharding = sort->operand(0)->sharding();
if (sharding.IsTileMaximal()) {
return std::nullopt;
}
for (int64_t dim = 0; dim < sort->shape().tuple_shapes(0).dimensions_size();
++dim) {
if (sharding.tile_assignment().dim(dim) > 1) {
if (dim != sort_dim) {
return std::nullopt;
}
}
}
const int64_t shard_count = sharding.tile_assignment().dim(sort_dim);
if (shard_count <= 1) {
return std::nullopt;
}
const int64_t input_size = hlo->operand(0)->shape().dimensions(sort_dim);
const int64_t per_partition_size = CeilOfRatio(input_size, shard_count);
if (k.value() >= per_partition_size) {
return std::nullopt;
}
return k;
}
HloInstruction* SliceFirstK(HloInstruction* hlo, SpmdBuilder* builder,
int64_t slice_dim, int64_t k) {
const Shape& hlo_shape = hlo->shape();
auto hlo_dims = hlo_shape.dimensions();
std::vector<int64_t> start_indices(hlo_shape.dimensions_size(), 0);
std::vector<int64_t> limit_indices(hlo_dims.begin(), hlo_dims.end());
std::vector<int64_t> strides(hlo_shape.dimensions_size(), 1);
limit_indices[slice_dim] = k;
auto output_shape = hlo_shape;
output_shape.set_dimensions(slice_dim, k);
return builder->AddInstruction(HloInstruction::CreateSlice(
output_shape, hlo, start_indices, limit_indices, strides));
}
int64_t ShardCountAtDim(const HloSharding& sharding, int64_t dim) {
if (sharding.IsTileMaximal()) {
return 1;
}
if (dim == -1) {
return 1;
}
return sharding.tile_assignment().dim(dim);
}
std::optional<std::vector<std::pair<int64_t, int64_t>>>
GetReshardAllToAllSourceTargetDims(const HloSharding& source,
const HloSharding& target) {
if (source.IsTileMaximal() || target.IsTileMaximal() ||
source.tile_assignment().num_dimensions() !=
target.tile_assignment().num_dimensions() ||
source.NumTiles() != target.NumTiles()) {
return std::nullopt;
}
std::map<int64_t, std::vector<int64_t>> source_size_to_dim;
std::map<int64_t, std::vector<int64_t>> target_size_to_dim;
for (int64_t i = 0; i < source.tile_assignment().num_dimensions(); ++i) {
if (source.tile_assignment().dim(i) == target.tile_assignment().dim(i)) {
continue;
}
source_size_to_dim[source.tile_assignment().dim(i)].push_back(i);
target_size_to_dim[target.tile_assignment().dim(i)].push_back(i);
}
if (source_size_to_dim.empty() ||
source_size_to_dim.size() != target_size_to_dim.size()) {
return std::nullopt;
}
for (const auto& entry : source_size_to_dim) {
auto target_it = target_size_to_dim.find(entry.first);
if (target_it == target_size_to_dim.end() ||
target_it->second.size() != entry.second.size()) {
return std::nullopt;
}
}
std::vector<std::pair<int64_t, int64_t>> result;
auto remove_entry = [](int64_t size, int64_t dim,
std::map<int64_t, std::vector<int64_t>>& size_to_dim) {
size_to_dim[size].erase(
std::remove_if(size_to_dim[size].begin(), size_to_dim[size].end(),
[dim](int64_t a) { return a == dim; }),
size_to_dim[size].end());
if (size_to_dim[size].empty()) {
size_to_dim.erase(size);
}
};
while (!source_size_to_dim.empty()) {
int64_t source_size = source_size_to_dim.begin()->first;
int64_t i = source_size_to_dim.begin()->second.back();
int64_t target_i_size = target.tile_assignment().dim(i);
if (target_i_size == source_size) {
remove_entry(source_size, i, source_size_to_dim);
remove_entry(source_size, i, target_size_to_dim);
continue;
}
auto j_it = source_size_to_dim[target_i_size].begin();
int64_t j = *j_it;
if (source_size == 1) {
while (target.tile_assignment().dim(j) == 1) {
if (++j_it == source_size_to_dim[target_i_size].end()) {
break;
}
j = *j_it;
}
} else if (target_i_size % source_size == 0) {
while (target.tile_assignment().dim(j) != source_size) {
if (++j_it == source_size_to_dim[target_i_size].end()) {
break;
}
j = *j_it;
}
} else {
return std::nullopt;
}
result.emplace_back(j, i);
remove_entry(target_i_size, i, target_size_to_dim);
source_size_to_dim.begin()->second.back() = j;
remove_entry(target_i_size, j, source_size_to_dim);
}
return result;
}
bool CanReshardWithCollectivePermute(const HloSharding& source,
const HloSharding& target) {
return !source.IsTileMaximal() && !target.IsTileMaximal() &&
source.tile_assignment().dimensions() ==
target.tile_assignment().dimensions() &&
source.ReplicateOnLastTileDim() == target.ReplicateOnLastTileDim() &&
source.tile_assignment() != target.tile_assignment();
}
std::optional<GroupedSharding> AlignGroupsWithInternal(
GroupedSharding grouped_sharding, const GroupedSharding& reference,
bool requires_compatibility, bool ignore_group_order) {
auto get_permutation = [](absl::Span<const int64_t> src,
absl::Span<const int64_t> dst) {
CHECK_EQ(src.size(), dst.size());
absl::flat_hash_map<int64_t, int64_t> dst_reverse_map(dst.size());
for (int64_t i = 0; i < dst.size(); ++i) {
dst_reverse_map[dst[i]] = i;
}
std::vector<int64_t> permutation(src.size());
for (int64_t i = 0; i < src.size(); ++i) {
auto it = dst_reverse_map.find(src[i]);
CHECK(it != dst_reverse_map.end());
permutation[i] = it->second;
}
return permutation;
};
CHECK_EQ(grouped_sharding.device_groups.size(),
reference.device_groups.size());
std::vector<int64_t> device_to_ref_group(reference.device_groups.size() *
reference.device_groups[0].size());
for (int64_t g = 0; g < reference.device_groups.size(); ++g) {
for (int64_t device : reference.device_groups[g]) {
device_to_ref_group[device] = g;
}
}
auto unique_ref_dev_group =
[&](absl::Span<const int64_t> devices) -> int64_t {
int64_t ref_g = -1;
for (int64_t device : devices) {
if (ref_g == -1) {
ref_g = device_to_ref_group[device];
} else if (ref_g != device_to_ref_group[device]) {
return -1;
}
}
return ref_g;
};
bool matching_groups = true;
std::vector<int64_t> original_src_to_ref_permutation;
for (int64_t g = 0; g < grouped_sharding.device_groups.size(); ++g) {
int64_t ref_g = unique_ref_dev_group(grouped_sharding.device_groups[g]);
if (ref_g < 0 || (!ignore_group_order && g != ref_g)) {
if (requires_compatibility) {
return std::nullopt;
}
matching_groups = false;
break;
}
if (g == 0) {
original_src_to_ref_permutation = get_permutation(
grouped_sharding.device_groups[g], reference.device_groups[ref_g]);
} else if (requires_compatibility) {
if (original_src_to_ref_permutation !=
get_permutation(grouped_sharding.device_groups[g],
reference.device_groups[ref_g])) {
return std::nullopt;
}
}
}
if (matching_groups && !grouped_sharding.sharding.IsTileMaximal()) {
auto tiles = [&] {
auto array =
grouped_sharding.sharding.tile_assignment().shared_array_clone();
array->Each([&](absl::Span<const int64_t> indices, int64_t* device) {
*device = original_src_to_ref_permutation[*device];
});
return TileAssignment(std::move(array));
}();
grouped_sharding.sharding =
grouped_sharding.sharding.ReplicateOnLastTileDim()
? HloSharding::PartialTile(tiles)
: HloSharding::Tile(tiles);
}
grouped_sharding.device_groups = reference.device_groups;
return grouped_sharding;
}
GroupedSharding AlignGroupsWith(GroupedSharding grouped_sharding,
const GroupedSharding& reference,
bool ignore_group_order) {
return *AlignGroupsWithInternal(std::move(grouped_sharding), reference,
false,
ignore_group_order);
}
std::optional<GroupedSharding> AlignGroupsWithIfCompatible(
GroupedSharding grouped_sharding, const GroupedSharding& reference) {
return AlignGroupsWithInternal(std::move(grouped_sharding), reference,
true,
false);
}
HloSharding AlignShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> sharding_dims,
const HloSharding& reference,
absl::Span<const int64_t> reference_dims) {
auto sharding_grouped =
hlo_sharding_util::GroupShardingOnDims(sharding, sharding_dims);
auto reference_grouped =
hlo_sharding_util::GroupShardingOnDims(reference, reference_dims);
return hlo_sharding_util::UngroupSharding(
AlignGroupsWith(sharding_grouped, reference_grouped));
}
Shape GetPerGroupBaseShape(const GroupedSharding& grouped_sharding,
const Shape& original_base_shape) {
auto result = original_base_shape;
for (int64_t i = 0; i < grouped_sharding.group_dims.size(); ++i) {
int64_t dim = grouped_sharding.group_dims[i];
if (dim >= original_base_shape.rank()) {
continue;
}
int64_t groups = grouped_sharding.group_dim_sizes[i];
result.set_dimensions(dim, CeilOfRatio(result.dimensions(dim), groups));
}
return result;
}
PartitionedHlo::PartitioningState CreatePerGroupPartitioningState(
const PartitionedHlo::PartitioningState& state,
const std::vector<std::vector<int64_t>>& device_groups, SpmdBuilder* b) {
auto result = state;
result.collective_ops_creator = GetPerGroupCollectiveOpsCreator(
state.collective_ops_creator, device_groups);
result.partition_id =
GetInGroupPartitionId(state.partition_id, device_groups, b);
std::vector<std::string> per_group_strings(device_groups.size());
for (int64_t i = 0; i < per_group_strings.size(); ++i) {
per_group_strings[i] = absl::StrJoin(device_groups[i], ",");
}
auto& grouped_cache =
state.reshard_cache->groupd_caches[absl::StrJoin(per_group_strings, ";")];
if (!grouped_cache) {
grouped_cache = std::make_unique<PartitionedHlo::ReshardCache>();
}
result.reshard_cache = grouped_cache.get();
return result;
}
HloInstruction* PerGroupSliceFromReplicated(
HloInstruction* replicated, HloInstruction* partition_id,
const std::vector<std::vector<int64_t>>& device_groups,
absl::Span<const int64_t> group_dims,
absl::Span<const int64_t> group_dim_sizes, SpmdBuilder* b) {
std::vector<uint32_t> group_ids(device_groups.size() *
device_groups[0].size());
for (int64_t g = 0; g < device_groups.size(); ++g) {
for (int64_t device : device_groups[g]) {
group_ids[device] = g;
}
}
auto group_id = TableLookup<uint32_t>(group_ids, U32, partition_id, b);
std::vector<int64_t> group_level_tile_dims(replicated->shape().rank(), 1);
for (int64_t i = 0; i < group_dims.size(); ++i) {
group_level_tile_dims[group_dims[i]] = group_dim_sizes[i];
}
auto group_level_tile = [&] {
absl::InlinedVector<int, 6> perm_dims(group_dims.begin(), group_dims.end());
absl::c_sort(perm_dims);
absl::InlinedVector<int, 6> perm_dim_map(group_level_tile_dims.size(), -1);
for (int i = 0; i < perm_dims.size(); ++i) {
perm_dim_map[perm_dims[i]] = i;
}
absl::InlinedVector<int, 6> transpose_perm(group_dims.size());
for (int i = 0; i < group_dims.size(); ++i) {
transpose_perm[i] = perm_dim_map[group_dims[i]];
CHECK_NE(transpose_perm[i], -1);
}
return TileAssignment(group_level_tile_dims, group_dim_sizes,
transpose_perm);
}();
auto group_level_sharding = HloSharding::Tile(std::move(group_level_tile));
auto padded_hlo = PadBaseShapeBeforeUnevenTiledSharding(
replicated, group_level_sharding, b);
auto shard_shape =
MakePartitionedShape(replicated->shape(), group_level_sharding);
return b->AddInstruction(HloInstruction::CreateDynamicSlice(
shard_shape, padded_hlo,
MakePartitionOffsets(replicated->shape(), group_level_sharding, group_id,
b),
shard_shape.dimensions()));
}
std::optional<std::vector<int64_t>> FindMatchingPartitionedDimsForGrouping(
const HloSharding& sharding,
const std::vector<std::vector<int64_t>>& device_groups) {
if (sharding.IsTileMaximal() || device_groups.size() < 2) {
return std::nullopt;
}
const int64_t num_devices = sharding.tile_assignment().num_elements();
if (num_devices != device_groups.size() * device_groups[0].size()) {
return std::nullopt;
}
std::vector<int64_t> dims;
if (device_groups[0].size() < 2) {
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (sharding.tile_assignment().dim(i) > 1) {
dims.push_back(i);
}
}
return dims;
}
std::vector<std::vector<int64_t>> device_to_index(
num_devices,
std::vector<int64_t>(sharding.tile_assignment().num_dimensions()));
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> index, int64_t device) {
device_to_index[device].assign(index.begin(), index.end());
});
int64_t group_count = 1;
for (int64_t i = 0; i < sharding.tile_assignment().num_dimensions(); ++i) {
if (device_to_index[device_groups[0][0]][i] ==
device_to_index[device_groups[0][1]][i]) {
dims.push_back(i);
group_count *= sharding.tile_assignment().dim(i);
}
}
if (group_count != device_groups.size()) {
return std::nullopt;
}
for (const auto& group : device_groups) {
for (int64_t i = 1; i < group.size(); ++i) {
if (absl::c_any_of(dims, [&](const int64_t dim) {
return device_to_index[group[i]][dim] !=
device_to_index[group[0]][dim];
})) {
return std::nullopt;
}
}
}
return dims;
}
HloSharding CreateMatchingShardingOnDims(
const Shape& target_shape, const HloSharding& source_sharding,
absl::Span<const int64_t> target_dims,
absl::Span<const int64_t> source_dims) {
CHECK(target_dims.size() == source_dims.size())
<< "Expected 1:1 match between parallel dimensions";
if (source_sharding.IsReplicated()) {
return HloSharding::Replicate();
}
absl::InlinedVector<int64_t, 4> tile_dims(target_shape.dimensions_size(), 1);
int num_tiles = 1;
for (int i = 0, end = target_dims.size(); i < end; ++i) {
num_tiles *= source_sharding.tile_assignment().dim(source_dims[i]);
tile_dims[target_dims[i]] =
source_sharding.tile_assignment().dim(source_dims[i]);
}
bool to_be_partially_replicated = false;
if (num_tiles != source_sharding.tile_assignment().num_elements()) {
CHECK_EQ(source_sharding.tile_assignment().num_elements() % num_tiles, 0);
to_be_partially_replicated = true;
tile_dims.push_back(source_sharding.tile_assignment().num_elements() /
num_tiles);
}
auto tgt_tile_assignment =
source_sharding.tile_assignment().Reshape(tile_dims);
if (to_be_partially_replicated) {
return AlignShardingOnDims(HloSharding::PartialTile(tgt_tile_assignment),
target_dims, source_sharding, source_dims);
} else {
return AlignShardingOnDims(HloSharding::Tile(tgt_tile_assignment),
target_dims, source_sharding, source_dims);
}
}
std::optional<GatherScatterParallelDimSharding>
GatherScatterOperandsShardedAcrossParallelDims(
const HloInstruction& operand, const HloInstruction& indices,
const hlo_sharding_util::GatherScatterParallelDims& parallel_dims) {
const auto& indices_parallel_dims = parallel_dims.indices_parallel_dims;
const auto& operand_parallel_dims = parallel_dims.operand_parallel_dims;
if (indices_parallel_dims.size() != operand_parallel_dims.size()) {
return std::nullopt;
}
auto new_index_shard = indices.sharding();
auto new_operand_shard = operand.sharding();
int idx_parallel_tiles_num = new_index_shard.NumTiles(indices_parallel_dims);
int op_parallel_tiles_num = new_operand_shard.NumTiles(operand_parallel_dims);
if (idx_parallel_tiles_num == 1 && op_parallel_tiles_num == 1) {
return std::nullopt;
}
if (new_index_shard.IsReplicated()) {
return GatherScatterParallelDimSharding{
CreateMatchingShardingOnDims(indices.shape(), new_operand_shard,
indices_parallel_dims,
operand_parallel_dims),
new_operand_shard};
}
if (new_operand_shard.IsReplicated()) {
return GatherScatterParallelDimSharding{
new_index_shard, CreateMatchingShardingOnDims(
operand.shape(), new_index_shard,
operand_parallel_dims, indices_parallel_dims)};
}
if (idx_parallel_tiles_num != op_parallel_tiles_num) {
auto to_adjust_dims = operand_parallel_dims;
auto target_dims = indices_parallel_dims;
HloSharding* target = &new_index_shard;
HloSharding* to_adjust = &new_operand_shard;
if (idx_parallel_tiles_num < op_parallel_tiles_num) {
std::swap(to_adjust_dims, target_dims);
std::swap(to_adjust, target);
}
if (!to_adjust->ReplicateOnLastTileDim()) {
return std::nullopt;
}
std::vector<int64_t> new_tile_assignment_dims(
to_adjust->tile_assignment().dimensions().begin(),
to_adjust->tile_assignment().dimensions().end());
for (int i = 0; i < to_adjust_dims.size(); ++i) {
int64_t target_dim = target->tile_assignment().dim(target_dims[i]);
int64_t to_adjust_dim =
to_adjust->tile_assignment().dim(to_adjust_dims[i]);
if (target_dim < to_adjust_dim) {
return std::nullopt;
}
if (target_dim == to_adjust_dim) {
continue;
}
int64_t ratio = target_dim / to_adjust_dim;
if (target_dim % to_adjust_dim != 0 ||
new_tile_assignment_dims.back() % ratio != 0) {
return std::nullopt;
}
new_tile_assignment_dims[to_adjust_dims[i]] *= ratio;
new_tile_assignment_dims.back() /= ratio;
}
CHECK_GE(new_tile_assignment_dims.back(), 1);
bool to_partially_replicate = true;
if (new_tile_assignment_dims.back() == 1) {
new_tile_assignment_dims.pop_back();
to_partially_replicate = false;
}
auto new_tile_assignment =
to_adjust->tile_assignment().Reshape(new_tile_assignment_dims);
if (to_partially_replicate) {
*to_adjust =
AlignShardingOnDims(HloSharding::PartialTile(new_tile_assignment),
to_adjust_dims, *target, target_dims);
} else {
*to_adjust = AlignShardingOnDims(HloSharding::Tile(new_tile_assignment),
to_adjust_dims, *target, target_dims);
}
}
std::vector<int64_t> operand_shard_tile_dims(
new_operand_shard.tile_assignment().dimensions().begin(),
new_operand_shard.tile_assignment().dimensions().end());
for (int i = 0; i < indices_parallel_dims.size(); ++i) {
operand_shard_tile_dims[operand_parallel_dims[i]] =
new_index_shard.tile_assignment().dim(indices_parallel_dims[i]);
}
auto operand_shard_tiles =
new_operand_shard.tile_assignment().Reshape(operand_shard_tile_dims);
new_operand_shard = AlignShardingOnDims(
new_operand_shard.ReplicateOnLastTileDim()
? HloSharding::PartialTile(operand_shard_tiles)
: HloSharding::Tile(operand_shard_tiles),
operand_parallel_dims, new_index_shard, indices_parallel_dims);
return GatherScatterParallelDimSharding{new_index_shard, new_operand_shard};
}
int64_t FindRotateRightPattern(const HloInstruction* concat,
const HloInstruction* lhs,
const HloInstruction* rhs) {
if (lhs->opcode() != HloOpcode::kSlice ||
rhs->opcode() != HloOpcode::kSlice ||
lhs->operand(0) != rhs->operand(0)) {
return -1;
}
const HloInstruction* to_rotate = lhs->operand(0);
if (!ShapeUtil::Compatible(to_rotate->shape(), concat->shape()) ||
concat->sharding() != to_rotate->sharding()) {
return -1;
}
const int64_t dim = concat->concatenate_dimension();
if (lhs->slice_strides(dim) != 1 || rhs->slice_strides(dim) != 1 ||
lhs->slice_starts(dim) != rhs->slice_limits(dim)) {
return -1;
}
return lhs->shape().dimensions(dim);
}
std::optional<PadWithWrapPattern> FindPadWithWrapPattern(
const HloInstruction* concat, const HloInstruction* lhs,
const HloInstruction* mid, const HloInstruction* rhs) {
if (!lhs || !mid || !rhs) {
return std::nullopt;
}
auto skip_elementwise_ops = [&](const HloInstruction* inst) {
std::vector<const HloInstruction*> modifiers;
while (inst->IsElementwise() && inst->operand_count() == 1 &&
inst->user_count() == 1) {
if (inst->opcode() != HloOpcode::kCopy) {
modifiers.push_back(inst);
}
inst = inst->operand(0);
}
return std::make_pair(modifiers, inst);
};
PadWithWrapPattern pad_pattern;
auto skip_result = skip_elementwise_ops(lhs);
pad_pattern.lhs_modifiers = std::move(skip_result.first);
lhs = skip_result.second;
skip_result = skip_elementwise_ops(rhs);
pad_pattern.rhs_modifiers = std::move(skip_result.first);
rhs = skip_result.second;
const int64_t dim = concat->concatenate_dimension();
if (lhs->opcode() != HloOpcode::kSlice ||
rhs->opcode() != HloOpcode::kSlice || lhs->operand(0) != mid ||
rhs->operand(0) != mid || lhs->slice_strides(dim) != 1 ||
rhs->slice_strides(dim) != 1 || lhs->sharding() != mid->sharding() ||
rhs->sharding() != mid->sharding() ||
lhs->sharding() != concat->sharding()) {
return std::nullopt;
}
pad_pattern.lhs_slice_start = lhs->slice_starts(dim);
pad_pattern.rhs_slice_start = rhs->slice_starts(dim);
return pad_pattern;
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue>
ReshardDataForSlicing(absl::Span<const int64_t> strides,
absl::Span<const int64_t> starts,
absl::Span<const int64_t> limits,
PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b) {
Window window;
for (int64_t i = 0; i < starts.size(); ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(1);
dim->set_stride(strides[i]);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_padding_low(-starts[i]);
dim->set_padding_high(limits[i] - to_reshard.base_shape().dimensions(i));
dim->set_base_dilation(1);
}
return to_reshard.ReshardAsWindowedInput(
window, target_sharding,
CreateZero(
ShapeUtil::MakeShape(to_reshard.hlo()->shape().element_type(), {}),
b),
false);
}
HloInstruction* SliceDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
absl::Span<const int64_t> strides, const Shape& base_shape,
const HloSharding& target_sharding, SpmdBuilder* b) {
std::vector<int64_t> start_indices(strides.size());
std::vector<int64_t> limit_indices(strides.size());
bool need_slice = false;
for (int64_t i = 0; i < strides.size(); ++i) {
auto dim = reshard_operand.shard_window.dimensions(i);
start_indices[i] = -dim.padding_low();
limit_indices[i] = reshard_operand.sharded_input->shape().dimensions(i) +
dim.padding_high();
if (start_indices[i] != 0 || strides[i] != 1 ||
limit_indices[i] !=
reshard_operand.sharded_input->shape().dimensions(i)) {
need_slice = true;
}
}
if (need_slice) {
auto shard_shape = MakePartitionedShape(base_shape, target_sharding);
return b->AddInstruction(
HloInstruction::CreateSlice(shard_shape, reshard_operand.sharded_input,
start_indices, limit_indices, strides));
}
return reshard_operand.sharded_input;
}
std::optional<PartitionedHlo::WindowedInputShardReturnValue> ReshardDataForPad(
HloInstruction* pad_value, PaddingConfig pc, PartitionedHlo to_reshard,
const HloSharding& target_sharding, SpmdBuilder* b) {
Window window;
bool needs_masking = false;
const bool pad_value_is_zero =
pad_value->IsConstant() && pad_value->literal().IsZero({});
for (int64_t i = 0; i < to_reshard.hlo()->shape().rank(); ++i) {
WindowDimension* dim = window.add_dimensions();
auto pd = pc.dimensions(i);
dim->set_size(1);
dim->set_stride(1);
dim->set_window_dilation(1);
dim->set_window_reversal(false);
dim->set_padding_low(pd.edge_padding_low());
dim->set_padding_high(pd.edge_padding_high());
dim->set_base_dilation(pd.interior_padding() + 1);
const int64_t shard_count = target_sharding.tile_assignment().dim(i);
needs_masking |= shard_count > 1 &&
(pd.edge_padding_low() > 0 || pd.edge_padding_high() > 0 ||
pd.interior_padding() > 0) &&
(!pad_value_is_zero ||
to_reshard.base_shape().dimensions(i) % shard_count != 0);
}
return to_reshard.ReshardAsWindowedInput(
window, target_sharding, pad_value,
needs_masking, true);
}
HloInstruction* PadDataFromWindowReshard(
const PartitionedHlo::WindowedInputShardReturnValue& reshard_operand,
HloInstruction* pad_value, SpmdBuilder* b) {
PaddingConfig sharded_padding_config;
bool need_pad = false;
for (int64_t i = 0; i < reshard_operand.sharded_input->shape().rank(); ++i) {
auto dim = sharded_padding_config.add_dimensions();
const auto& wd = reshard_operand.shard_window.dimensions(i);
dim->set_edge_padding_low(wd.padding_low());
dim->set_edge_padding_high(wd.padding_high());
dim->set_interior_padding(wd.base_dilation() - 1);
if (wd.padding_low() != 0 || wd.padding_high() != 0 ||
wd.base_dilation() != 1) {
need_pad = true;
}
}
auto sharded_data = reshard_operand.sharded_input;
if (need_pad) {
auto sharded_data_shape =
ShapeInference::InferPadShape(sharded_data->shape(), pad_value->shape(),
sharded_padding_config)
.value();
return b->AddInstruction(HloInstruction::CreatePad(
sharded_data_shape, sharded_data, pad_value, sharded_padding_config));
}
return sharded_data;
}
std::vector<std::vector<int64_t>> GetPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims) {
int64_t group_size = 1;
for (int64_t i : replication_dims) {
group_size *= sharding.tile_assignment().dim(i);
}
std::vector<std::vector<int64_t>> partition_groups(
sharding.tile_assignment().num_elements() / group_size);
sharding.tile_assignment().Each(
[&](absl::Span<const int64_t> indices, int64_t partition) {
int64_t group_id = 0;
for (int64_t i = 0; i < indices.size(); ++i) {
if (!absl::c_linear_search(replication_dims, i)) {
group_id *= sharding.tile_assignment().dim(i);
group_id += indices[i];
}
}
partition_groups[group_id].push_back(partition);
});
return partition_groups;
}
std::optional<IotaReplicaGroupList> GetIotaPartitionGroupsForReplication(
const HloSharding& sharding, absl::Span<const int64_t> replication_dims,
int64_t num_partitions) {
if (!sharding.tile_assignment().iota().has_value()) {
return std::nullopt;
}
if (sharding.tile_assignment().num_elements() != num_partitions) {
return std::nullopt;
}
int64_t group_size = 1;
for (int64_t i : replication_dims) {
group_size *= sharding.tile_assignment().dim(i);
}
int64_t num_replica_groups =
sharding.tile_assignment().num_elements() / group_size;
std::vector<int> transpose_dims(sharding.tile_assignment().num_dimensions());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
std::vector<int> replication_dims_sorted(replication_dims.begin(),
replication_dims.end());
std::sort(replication_dims_sorted.begin(), replication_dims_sorted.end());
for (int64_t i : replication_dims_sorted) {
auto it = std::find(transpose_dims.begin(), transpose_dims.end(), i);
if (it != transpose_dims.end()) {
transpose_dims.erase(it);
transpose_dims.push_back(i);
}
}
auto transpose_iota_tile_assignment =
sharding.tile_assignment().iota()->Transpose(transpose_dims);
if (!transpose_iota_tile_assignment.has_value()) {
return std::nullopt;
}
return IotaReplicaGroupList(num_replica_groups, group_size,
transpose_iota_tile_assignment->reshape_dims(),
transpose_iota_tile_assignment->transpose_perm());
}
CollectiveDeviceList ExpandPartitionGroupListAcrossReplicas(
IotaReplicaGroupList partition_group_list, int num_replicas,
int num_partitions) {
int partition_group_count = partition_group_list.num_replica_groups();
int partition_group_size = partition_group_list.num_devices_per_group();
CHECK_EQ((partition_group_count * partition_group_size), num_partitions);
int replica_group_count = partition_group_count * num_replicas;
std::vector<int64_t> new_reshape_dims(
partition_group_list.reshape_dims().begin(),
partition_group_list.reshape_dims().end());
new_reshape_dims.insert(new_reshape_dims.begin(), num_replicas);
std::vector<int> new_transpose_dims;
new_transpose_dims.push_back(0);
for (int64_t dim : partition_group_list.transpose_perm()) {
new_transpose_dims.push_back(dim + 1);
}
return CollectiveDeviceList(
IotaReplicaGroupList(replica_group_count, partition_group_size,
new_reshape_dims, new_transpose_dims));
}
}
} | #include "xla/service/spmd/spmd_partitioner_util.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
namespace xla {
namespace spmd {
namespace {
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding1) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}));
const std::vector<HloSharding> target_shardings = {
HloSharding::IotaTile({2, 2}),
HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0})};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[1]);
}
partial_sharding =
HloSharding::PartialTile(TileAssignment({1, 2, 2}, {2, 2}, {1, 0}));
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_shardings[0]);
}
}
TEST(SPMDPartitionerUtilTest, PartialReplicateReshardCompatibleSharding2) {
HloSharding partial_sharding =
HloSharding::PartialTile(TileAssignment({2, 2, 8}));
const std::vector<HloSharding> target_shardings = {
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 3, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 2, 1, 4, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 2, 4})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 3, 1, 4, 2})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 2, 3})),
HloSharding::PartialTile(
TileAssignment({4, 4, 2}, {2, 2, 2, 2, 2}, {0, 4, 1, 3, 2}))};
for (const auto& target_sharding : target_shardings) {
auto result = PartialReplicateReshardCompatibleSharding(partial_sharding,
target_sharding);
EXPECT_EQ(result, target_sharding);
}
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {1});
std::vector<std::vector<int64_t>> expected_partition_groups = {
{0, 2}, {1, 3}, {4, 6}, {5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::vector<std::vector<int64_t>> actual_partition_groups =
GetPartitionGroupsForReplication(sharding, {0, 2});
std::vector<std::vector<int64_t>> expected_partition_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
EXPECT_THAT(actual_partition_groups,
testing::ContainerEq(expected_partition_groups));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {1}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 4);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 2);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(2, 2, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilTest, GetIotaPartitionGroupsForReplication2) {
HloSharding sharding = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {0, 2, 1});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(sharding, {0, 2}, 8);
EXPECT_TRUE(actual_partition_group_list.has_value());
EXPECT_EQ(actual_partition_group_list->num_replica_groups(), 2);
EXPECT_EQ(actual_partition_group_list->num_devices_per_group(), 4);
EXPECT_THAT(actual_partition_group_list->reshape_dims(),
testing::ElementsAre(4, 2));
EXPECT_THAT(actual_partition_group_list->transpose_perm(),
testing::ElementsAre(1, 0));
}
TEST(SPMDPartitionerUtilTest,
GetIotaPartitionGroupsForReplicationSkipWhenNotUsingAllPartitions) {
HloSharding simple_sharding = HloSharding::IotaTile({2, 2, 2});
std::optional<IotaReplicaGroupList> actual_partition_group_list =
GetIotaPartitionGroupsForReplication(simple_sharding, {1}, 16);
EXPECT_FALSE(actual_partition_group_list.has_value());
}
TEST(SPMDPartitionerUtilTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
IotaReplicaGroupList expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 50)
.iota_replica_group_list()
.value();
EXPECT_EQ(expanded_partition_group_list.num_replica_groups(), 20);
EXPECT_EQ(expanded_partition_group_list.num_devices_per_group(), 5);
EXPECT_THAT(expanded_partition_group_list.reshape_dims(),
testing::ElementsAre(4, 5, 5));
EXPECT_THAT(expanded_partition_group_list.transpose_perm(),
testing::ElementsAre(0, 2, 1));
}
TEST(SPMDPartitionerUtilDeathTest, ExpandPartitionGroupListAcrossReplicas) {
IotaReplicaGroupList partition_group_list =
IotaReplicaGroupList(10, 5, {2, 5, 5}, {0, 2, 1});
ASSERT_DEATH(
{
auto expanded_partition_group_list =
ExpandPartitionGroupListAcrossReplicas(partition_group_list, 2, 60);
},
"Check failed: \\(partition_group_count \\* partition_group_size\\) == "
"num_partitions \\(50 vs\\. 60\\)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/spmd_partitioner_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02e32eea-6457-4f34-9a05-e15a3b90742b | cpp | tensorflow/tensorflow | func | tensorflow/compiler/mlir/quantization/common/func.cc | tensorflow/compiler/mlir/quantization/common/func_test.cc | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/cc/saved_model/signature_constants.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
namespace mlir::quant {
namespace {
using ::tensorflow::kDefaultServingSignatureDefKey;
using ::tensorflow::kImportModelDefaultGraphFuncName;
bool IsPublicFuncOp(func::FuncOp func_op) {
return SymbolTable::getSymbolVisibility(&*func_op) ==
SymbolTable::Visibility::Public;
}
}
func::FuncOp FindMainFuncOp(ModuleOp module_op) {
if (const auto main_func_op = module_op.lookupSymbol<func::FuncOp>(
kImportModelDefaultGraphFuncName);
main_func_op != nullptr && IsPublicFuncOp(main_func_op)) {
return main_func_op;
}
if (const auto serving_default_func_op =
module_op.lookupSymbol<func::FuncOp>(kDefaultServingSignatureDefKey);
serving_default_func_op != nullptr &&
IsPublicFuncOp(serving_default_func_op)) {
return serving_default_func_op;
}
return nullptr;
}
} | #include "tensorflow/compiler/mlir/quantization/common/func.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir::quant {
namespace {
using ::testing::IsNull;
using ::testing::NotNull;
using FindMainFuncOpTest = ::mlir::quant::QuantizationTestBase;
TEST_F(FindMainFuncOpTest, ReturnsMainFuncOp) {
constexpr absl::string_view kModuleWithMainFunc = R"mlir(
module {
func.func @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithMainFunc);
EXPECT_THAT(*module_op, NotNull());
func::FuncOp main_func_op = FindMainFuncOp(*module_op);
EXPECT_THAT(main_func_op, NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateMainFunc = R"mlir(
module {
func.func private @main() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsServingDefaultFuncOp) {
constexpr absl::string_view kModuleWithServingDefaultFunc = R"mlir(
module {
func.func @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), NotNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenServingDefaultFuncOpIsPrivate) {
constexpr absl::string_view kModuleWithPrivateServingDefaultFunc = R"mlir(
module {
func.func private @serving_default() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithPrivateServingDefaultFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
TEST_F(FindMainFuncOpTest, ReturnsNullWhenMainFuncNotFound) {
constexpr absl::string_view kModuleWithNoMainFunc = R"mlir(
module {
func.func @foo() -> () {
return
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleWithNoMainFunc);
EXPECT_THAT(*module_op, NotNull());
EXPECT_THAT(FindMainFuncOp(*module_op), IsNull());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/func.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/func_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca383435-df24-445b-95ff-5b14437f8c41 | cpp | abseil/abseil-cpp | overload | absl/functional/overload.h | absl/functional/overload_test.cc | #ifndef ABSL_FUNCTIONAL_OVERLOAD_H_
#define ABSL_FUNCTIONAL_OVERLOAD_H_
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
template <typename... T>
struct Overload final : T... {
using T::operator()...;
constexpr explicit Overload(T... ts) : T(std::move(ts))... {}
};
template <typename... T>
Overload(T...) -> Overload<T...>;
#else
namespace functional_internal {
template <typename T>
constexpr bool kDependentFalse = false;
}
template <typename Dependent = int, typename... T>
auto Overload(T&&...) {
static_assert(functional_internal::kDependentFalse<Dependent>,
"Overload is only usable with C++17 or above.");
}
#endif
ABSL_NAMESPACE_END
}
#endif | #include "absl/functional/overload.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/variant.h"
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#include "gtest/gtest.h"
namespace {
TEST(OverloadTest, DispatchConsidersTypeWithAutoFallback) {
auto overloaded = absl::Overload{
[](int v) { return absl::StrCat("int ", v); },
[](double v) { return absl::StrCat("double ", v); },
[](const char* v) { return absl::StrCat("const char* ", v); },
[](auto v) { return absl::StrCat("auto ", v); },
};
EXPECT_EQ("int 1", overloaded(1));
EXPECT_EQ("double 2.5", overloaded(2.5));
EXPECT_EQ("const char* hello", overloaded("hello"));
EXPECT_EQ("auto 1.5", overloaded(1.5f));
}
TEST(OverloadTest, DispatchConsidersNumberOfArguments) {
auto overloaded = absl::Overload{
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; },
};
EXPECT_EQ(3, overloaded(2));
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ("none", overloaded());
}
TEST(OverloadTest, SupportsConstantEvaluation) {
auto overloaded = absl::Overload{
[](int a) { return a + 1; },
[](int a, int b) { return a * b; },
[]() -> absl::string_view { return "none"; },
};
static_assert(overloaded() == "none");
static_assert(overloaded(2) == 3);
static_assert(overloaded(3, 7) == 21);
}
TEST(OverloadTest, PropogatesDefaults) {
auto overloaded = absl::Overload{
[](int a, int b = 5) { return a * b; },
[](double c) { return c; },
};
EXPECT_EQ(21, overloaded(3, 7));
EXPECT_EQ(35, overloaded(7));
EXPECT_EQ(2.5, overloaded(2.5));
}
TEST(OverloadTest, AmbiguousWithDefaultsNotInvocable) {
auto overloaded = absl::Overload{
[](int a, int b = 5) { return a * b; },
[](int c) { return c; },
};
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
static_assert(std::is_invocable_v<decltype(overloaded), int, int>);
}
TEST(OverloadTest, AmbiguousDuplicatesNotInvocable) {
auto overloaded = absl::Overload{
[](int a) { return a; },
[](int c) { return c; },
};
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionNotInvocable) {
auto overloaded = absl::Overload{
[](uint16_t a) { return a; },
[](uint64_t c) { return c; },
};
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, AmbiguousConversionWithAutoNotInvocable) {
auto overloaded = absl::Overload{
[](auto a) { return a; },
[](auto c) { return c; },
};
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST(OverloadTest, AmbiguousConversionWithAutoAndTemplateNotInvocable) {
auto overloaded = absl::Overload{
[](auto a) { return a; },
[]<class T>(T c) { return c; },
};
static_assert(!std::is_invocable_v<decltype(overloaded), int>);
}
TEST(OverloadTest, DispatchConsidersTypeWithTemplateFallback) {
auto overloaded = absl::Overload{
[](int a) { return a; },
[]<class T>(T c) { return c * 2; },
};
EXPECT_EQ(7, overloaded(7));
EXPECT_EQ(14.0, overloaded(7.0));
}
#endif
TEST(OverloadTest, DispatchConsidersSfinae) {
auto overloaded = absl::Overload{
[](auto a) -> decltype(a + 1) { return a + 1; },
};
static_assert(std::is_invocable_v<decltype(overloaded), int>);
static_assert(!std::is_invocable_v<decltype(overloaded), std::string>);
}
TEST(OverloadTest, VariantVisitDispatchesCorrectly) {
absl::variant<int, double, std::string> v(1);
auto overloaded = absl::Overload{
[](int) -> absl::string_view { return "int"; },
[](double) -> absl::string_view { return "double"; },
[](const std::string&) -> absl::string_view { return "string"; },
};
EXPECT_EQ("int", absl::visit(overloaded, v));
v = 1.1;
EXPECT_EQ("double", absl::visit(overloaded, v));
v = "hello";
EXPECT_EQ("string", absl::visit(overloaded, v));
}
TEST(OverloadTest, VariantVisitWithAutoFallbackDispatchesCorrectly) {
absl::variant<std::string, int32_t, int64_t> v(int32_t{1});
auto overloaded = absl::Overload{
[](const std::string& s) { return s.size(); },
[](const auto& s) { return sizeof(s); },
};
EXPECT_EQ(4, absl::visit(overloaded, v));
v = int64_t{1};
EXPECT_EQ(8, absl::visit(overloaded, v));
v = std::string("hello");
EXPECT_EQ(5, absl::visit(overloaded, v));
}
TEST(OverloadTest, UseWithParentheses) {
const auto overloaded =
absl::Overload([](const std::string& s) { return s.size(); },
[](const auto& s) { return sizeof(s); });
absl::variant<std::string, int32_t, int64_t> v(int32_t{1});
EXPECT_EQ(4, absl::visit(overloaded, v));
v = int64_t{1};
EXPECT_EQ(8, absl::visit(overloaded, v));
v = std::string("hello");
EXPECT_EQ(5, absl::visit(overloaded, v));
}
TEST(OverloadTest, HasConstexprConstructor) {
constexpr auto overloaded = absl::Overload{
[](int v) { return absl::StrCat("int ", v); },
[](double v) { return absl::StrCat("double ", v); },
[](const char* v) { return absl::StrCat("const char* ", v); },
[](auto v) { return absl::StrCat("auto ", v); },
};
EXPECT_EQ("int 1", overloaded(1));
EXPECT_EQ("double 2.5", overloaded(2.5));
EXPECT_EQ("const char* hello", overloaded("hello"));
EXPECT_EQ("auto 1.5", overloaded(1.5f));
}
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/overload.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/functional/overload_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
7e1f6c5e-90d3-42ee-80fe-610b0813dc09 | cpp | abseil/abseil-cpp | mocking_bit_gen | absl/random/mocking_bit_gen.h | absl/random/mocking_bit_gen_test.cc | #ifndef ABSL_RANDOM_MOCKING_BIT_GEN_H_
#define ABSL_RANDOM_MOCKING_BIT_GEN_H_
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/fast_type_id.h"
#include "absl/container/flat_hash_map.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/mock_helpers.h"
#include "absl/random/random.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class BitGenRef;
namespace random_internal {
template <typename>
struct DistributionCaller;
class MockHelpers;
template <bool EnableValidation>
class MockingBitGenImpl {
public:
MockingBitGenImpl() = default;
~MockingBitGenImpl() = default;
using result_type = absl::BitGen::result_type;
static constexpr result_type(min)() { return (absl::BitGen::min)(); }
static constexpr result_type(max)() { return (absl::BitGen::max)(); }
result_type operator()() { return gen_(); }
private:
template <typename ResultT, typename... Args>
static auto GetMockFnType(ResultT, std::tuple<Args...>)
-> ::testing::MockFunction<ResultT(Args...)>;
template <typename MockFnType, typename ValidatorT, typename ResultT,
typename Tuple>
struct MockFnCaller;
template <typename MockFnType, typename ValidatorT, typename ResultT,
typename... Args>
struct MockFnCaller<MockFnType, ValidatorT, ResultT, std::tuple<Args...>> {
MockFnType* fn;
inline ResultT operator()(Args... args) {
ResultT result = fn->Call(args...);
ValidatorT::Validate(result, args...);
return result;
}
};
class FunctionHolder {
public:
virtual ~FunctionHolder() = default;
virtual void Apply( void* args_tuple,
void* result) = 0;
};
template <typename MockFnType, typename ValidatorT, typename ResultT,
typename ArgTupleT>
class FunctionHolderImpl final : public FunctionHolder {
public:
void Apply(void* args_tuple, void* result) final {
*static_cast<ResultT*>(result) = absl::apply(
MockFnCaller<MockFnType, ValidatorT, ResultT, ArgTupleT>{&mock_fn_},
*static_cast<ArgTupleT*>(args_tuple));
}
MockFnType mock_fn_;
};
template <typename ResultT, typename ArgTupleT, typename SelfT,
typename ValidatorT>
auto RegisterMock(SelfT&, base_internal::FastTypeIdType type, ValidatorT)
-> decltype(GetMockFnType(std::declval<ResultT>(),
std::declval<ArgTupleT>()))& {
using ActualValidatorT =
std::conditional_t<EnableValidation, ValidatorT, NoOpValidator>;
using MockFnType = decltype(GetMockFnType(std::declval<ResultT>(),
std::declval<ArgTupleT>()));
using WrappedFnType = absl::conditional_t<
std::is_same<SelfT, ::testing::NiceMock<MockingBitGenImpl>>::value,
::testing::NiceMock<MockFnType>,
absl::conditional_t<
std::is_same<SelfT, ::testing::NaggyMock<MockingBitGenImpl>>::value,
::testing::NaggyMock<MockFnType>,
absl::conditional_t<
std::is_same<SelfT,
::testing::StrictMock<MockingBitGenImpl>>::value,
::testing::StrictMock<MockFnType>, MockFnType>>>;
using ImplT =
FunctionHolderImpl<WrappedFnType, ActualValidatorT, ResultT, ArgTupleT>;
auto& mock = mocks_[type];
if (!mock) {
mock = absl::make_unique<ImplT>();
}
return static_cast<ImplT*>(mock.get())->mock_fn_;
}
inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
void* result) {
auto it = mocks_.find(type);
if (it == mocks_.end()) return false;
it->second->Apply(args_tuple, result);
return true;
}
absl::flat_hash_map<base_internal::FastTypeIdType,
std::unique_ptr<FunctionHolder>>
mocks_;
absl::BitGen gen_;
template <typename>
friend struct ::absl::random_internal::DistributionCaller;
friend class ::absl::BitGenRef;
friend class ::absl::random_internal::MockHelpers;
};
}
using MockingBitGen = random_internal::MockingBitGenImpl<true>;
using UnvalidatedMockingBitGen ABSL_DEPRECATED("Use MockingBitGen instead") =
random_internal::MockingBitGenImpl<false>;
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/mocking_bit_gen.h"
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <numeric>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/mock_distributions.h"
#include "absl/random/random.h"
namespace {
using ::testing::_;
using ::testing::Ne;
using ::testing::Return;
TEST(BasicMocking, AllDistributionsAreOverridable) {
absl::MockingBitGen gen;
EXPECT_NE(absl::Uniform<int>(gen, 1, 1000000), 20);
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
.WillOnce(Return(20));
EXPECT_EQ(absl::Uniform<int>(gen, 1, 1000000), 20);
EXPECT_NE(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
EXPECT_CALL(absl::MockUniform<double>(), Call(gen, 0.0, 100.0))
.WillOnce(Return(5.0));
EXPECT_EQ(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
EXPECT_NE(absl::Exponential<double>(gen, 1.0), 42);
EXPECT_CALL(absl::MockExponential<double>(), Call(gen, 1.0))
.WillOnce(Return(42));
EXPECT_EQ(absl::Exponential<double>(gen, 1.0), 42);
EXPECT_NE(absl::Poisson<int>(gen, 1.0), 500);
EXPECT_CALL(absl::MockPoisson<int>(), Call(gen, 1.0)).WillOnce(Return(500));
EXPECT_EQ(absl::Poisson<int>(gen, 1.0), 500);
EXPECT_NE(absl::Bernoulli(gen, 0.000001), true);
EXPECT_CALL(absl::MockBernoulli(), Call(gen, 0.000001))
.WillOnce(Return(true));
EXPECT_EQ(absl::Bernoulli(gen, 0.000001), true);
EXPECT_NE(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
EXPECT_CALL(absl::MockZipf<int>(), Call(gen, 1000000, 2.0, 1.0))
.WillOnce(Return(1221));
EXPECT_EQ(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
EXPECT_NE(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
EXPECT_CALL(absl::MockGaussian<double>(), Call(gen, 0.0, 1.0))
.WillOnce(Return(0.001));
EXPECT_EQ(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
EXPECT_NE(absl::LogUniform<int>(gen, 0, 1000000, 2), 500000);
EXPECT_CALL(absl::MockLogUniform<int>(), Call(gen, 0, 1000000, 2))
.WillOnce(Return(500000));
EXPECT_EQ(absl::LogUniform<int>(gen, 0, 1000000, 2), 500000);
}
TEST(BasicMocking, OnDistribution) {
absl::MockingBitGen gen;
EXPECT_NE(absl::Uniform<int>(gen, 1, 1000000), 20);
ON_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
.WillByDefault(Return(20));
EXPECT_EQ(absl::Uniform<int>(gen, 1, 1000000), 20);
EXPECT_NE(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
ON_CALL(absl::MockUniform<double>(), Call(gen, 0.0, 100.0))
.WillByDefault(Return(5.0));
EXPECT_EQ(absl::Uniform<double>(gen, 0.0, 100.0), 5.0);
EXPECT_NE(absl::Exponential<double>(gen, 1.0), 42);
ON_CALL(absl::MockExponential<double>(), Call(gen, 1.0))
.WillByDefault(Return(42));
EXPECT_EQ(absl::Exponential<double>(gen, 1.0), 42);
EXPECT_NE(absl::Poisson<int>(gen, 1.0), 500);
ON_CALL(absl::MockPoisson<int>(), Call(gen, 1.0)).WillByDefault(Return(500));
EXPECT_EQ(absl::Poisson<int>(gen, 1.0), 500);
EXPECT_NE(absl::Bernoulli(gen, 0.000001), true);
ON_CALL(absl::MockBernoulli(), Call(gen, 0.000001))
.WillByDefault(Return(true));
EXPECT_EQ(absl::Bernoulli(gen, 0.000001), true);
EXPECT_NE(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
ON_CALL(absl::MockZipf<int>(), Call(gen, 1000000, 2.0, 1.0))
.WillByDefault(Return(1221));
EXPECT_EQ(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
EXPECT_NE(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
ON_CALL(absl::MockGaussian<double>(), Call(gen, 0.0, 1.0))
.WillByDefault(Return(0.001));
EXPECT_EQ(absl::Gaussian<double>(gen, 0.0, 1.0), 0.001);
EXPECT_NE(absl::LogUniform<int>(gen, 0, 1000000, 2), 2040);
ON_CALL(absl::MockLogUniform<int>(), Call(gen, 0, 1000000, 2))
.WillByDefault(Return(2040));
EXPECT_EQ(absl::LogUniform<int>(gen, 0, 1000000, 2), 2040);
}
TEST(BasicMocking, GMockMatchers) {
absl::MockingBitGen gen;
EXPECT_NE(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
ON_CALL(absl::MockZipf<int>(), Call(gen, 1000000, 2.0, 1.0))
.WillByDefault(Return(1221));
EXPECT_EQ(absl::Zipf<int>(gen, 1000000, 2.0, 1.0), 1221);
}
TEST(BasicMocking, OverridesWithMultipleGMockExpectations) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 10000))
.WillOnce(Return(20))
.WillOnce(Return(40))
.WillOnce(Return(60));
EXPECT_EQ(absl::Uniform(gen, 1, 10000), 20);
EXPECT_EQ(absl::Uniform(gen, 1, 10000), 40);
EXPECT_EQ(absl::Uniform(gen, 1, 10000), 60);
}
TEST(BasicMocking, DefaultArgument) {
absl::MockingBitGen gen;
ON_CALL(absl::MockExponential<double>(), Call(gen, 1.0))
.WillByDefault(Return(200));
EXPECT_EQ(absl::Exponential<double>(gen), 200);
EXPECT_EQ(absl::Exponential<double>(gen, 1.0), 200);
}
TEST(BasicMocking, MultipleGenerators) {
auto get_value = [](absl::BitGenRef gen_ref) {
return absl::Uniform(gen_ref, 1, 1000000);
};
absl::MockingBitGen unmocked_generator;
absl::MockingBitGen mocked_with_3;
absl::MockingBitGen mocked_with_11;
EXPECT_CALL(absl::MockUniform<int>(), Call(mocked_with_3, 1, 1000000))
.WillOnce(Return(3))
.WillRepeatedly(Return(17));
EXPECT_CALL(absl::MockUniform<int>(), Call(mocked_with_11, 1, 1000000))
.WillOnce(Return(11))
.WillRepeatedly(Return(17));
int unmocked_value = get_value(unmocked_generator);
EXPECT_NE(unmocked_value, 3);
EXPECT_NE(unmocked_value, 11);
EXPECT_EQ(get_value(mocked_with_3), 3);
EXPECT_EQ(get_value(mocked_with_11), 11);
EXPECT_NE(get_value(mocked_with_3), 3);
EXPECT_NE(get_value(mocked_with_11), 11);
}
TEST(BasicMocking, MocksNotTriggeredForIncorrectTypes) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<uint32_t>(), Call(gen))
.WillRepeatedly(Return(42));
bool uint16_always42 = true;
for (int i = 0; i < 10000; i++) {
EXPECT_EQ(absl::Uniform<uint32_t>(gen), 42);
uint16_always42 = uint16_always42 && absl::Uniform<uint16_t>(gen) == 42;
}
EXPECT_FALSE(uint16_always42);
}
TEST(BasicMocking, FailsOnUnsatisfiedMocks) {
EXPECT_NONFATAL_FAILURE(
[]() {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockExponential<double>(), Call(gen, 1.0))
.WillOnce(Return(3.0));
}(),
"unsatisfied and active");
}
TEST(OnUniform, RespectsUniformIntervalSemantics) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<int>(),
Call(absl::IntervalClosed, gen, 1, 1000000))
.WillOnce(Return(301));
EXPECT_NE(absl::Uniform(gen, 1, 1000000), 301);
EXPECT_EQ(absl::Uniform(absl::IntervalClosed, gen, 1, 1000000), 301);
}
TEST(OnUniform, RespectsNoArgUnsignedShorthand) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<uint32_t>(), Call(gen)).WillOnce(Return(42));
EXPECT_EQ(absl::Uniform<uint32_t>(gen), 42);
}
TEST(RepeatedlyModifier, ForceSnakeEyesForManyDice) {
auto roll_some_dice = [](absl::BitGenRef gen_ref) {
std::vector<int> results(16);
for (auto& r : results) {
r = absl::Uniform(absl::IntervalClosed, gen_ref, 1, 6);
}
return results;
};
std::vector<int> results;
absl::MockingBitGen gen;
results = roll_some_dice(gen);
EXPECT_LT(std::accumulate(std::begin(results), std::end(results), 0),
results.size() * 6);
ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalClosed, gen, 1, 6))
.WillByDefault(Return(6));
results = roll_some_dice(gen);
EXPECT_EQ(std::accumulate(std::begin(results), std::end(results), 0),
results.size() * 6);
}
TEST(WillOnce, DistinctCounters) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1, 1000000))
.Times(3)
.WillRepeatedly(Return(1));
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 1000001, 2000000))
.Times(3)
.WillRepeatedly(Return(1000001));
EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
EXPECT_EQ(absl::Uniform(gen, 1000001, 2000000), 1000001);
EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 1);
}
TEST(TimesModifier, ModifierSaturatesAndExpires) {
EXPECT_NONFATAL_FAILURE(
[]() {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, 0, 1000000))
.Times(3)
.WillRepeatedly(Return(15))
.RetiresOnSaturation();
EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
EXPECT_EQ(absl::Uniform(gen, 0, 1000000), 15);
EXPECT_NE(absl::Uniform(gen, 0, 1000000), 15);
}(),
"");
}
TEST(TimesModifier, Times0) {
absl::MockingBitGen gen;
EXPECT_CALL(absl::MockBernoulli(), Call(gen, 0.0)).Times(0);
EXPECT_CALL(absl::MockPoisson<int>(), Call(gen, 1.0)).Times(0);
}
TEST(AnythingMatcher, MatchesAnyArgument) {
using testing::_;
{
absl::MockingBitGen gen;
ON_CALL(absl::MockUniform<int>(), Call(absl::IntervalClosed, gen, _, 1000))
.WillByDefault(Return(11));
ON_CALL(absl::MockUniform<int>(),
Call(absl::IntervalClosed, gen, _, Ne(1000)))
.WillByDefault(Return(99));
EXPECT_EQ(absl::Uniform(absl::IntervalClosed, gen, 10, 1000000), 99);
EXPECT_EQ(absl::Uniform(absl::IntervalClosed, gen, 10, 1000), 11);
}
{
absl::MockingBitGen gen;
ON_CALL(absl::MockUniform<int>(), Call(gen, 1, _))
.WillByDefault(Return(25));
ON_CALL(absl::MockUniform<int>(), Call(gen, Ne(1), _))
.WillByDefault(Return(99));
EXPECT_EQ(absl::Uniform(gen, 3, 1000000), 99);
EXPECT_EQ(absl::Uniform(gen, 1, 1000000), 25);
}
{
absl::MockingBitGen gen;
ON_CALL(absl::MockUniform<int>(), Call(gen, _, _))
.WillByDefault(Return(145));
EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145);
EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145);
}
}
TEST(AnythingMatcher, WithWillByDefault) {
using testing::_;
absl::MockingBitGen gen;
std::vector<int> values = {11, 22, 33, 44, 55, 66, 77, 88, 99, 1010};
ON_CALL(absl::MockUniform<size_t>(), Call(gen, 0, _))
.WillByDefault(Return(0));
for (int i = 0; i < 100; i++) {
auto& elem = values[absl::Uniform(gen, 0u, values.size())];
EXPECT_EQ(elem, 11);
}
}
TEST(BasicMocking, WillByDefaultWithArgs) {
using testing::_;
absl::MockingBitGen gen;
ON_CALL(absl::MockPoisson<int>(), Call(gen, _))
.WillByDefault([](double lambda) {
return static_cast<int>(std::rint(lambda * 10));
});
EXPECT_EQ(absl::Poisson<int>(gen, 1.7), 17);
EXPECT_EQ(absl::Poisson<int>(gen, 0.03), 0);
}
TEST(MockingBitGen, InSequenceSucceedsInOrder) {
absl::MockingBitGen gen;
testing::InSequence seq;
EXPECT_CALL(absl::MockPoisson<int>(), Call(gen, 1.0)).WillOnce(Return(3));
EXPECT_CALL(absl::MockPoisson<int>(), Call(gen, 2.0)).WillOnce(Return(4));
EXPECT_EQ(absl::Poisson<int>(gen, 1.0), 3);
EXPECT_EQ(absl::Poisson<int>(gen, 2.0), 4);
}
TEST(MockingBitGen, NiceMock) {
::testing::NiceMock<absl::MockingBitGen> gen;
ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145));
ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3));
EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
EXPECT_EQ(absl::Uniform(gen, 10, 1000), 145);
EXPECT_EQ(absl::Uniform(gen, 100, 1000), 145);
}
TEST(MockingBitGen, NaggyMock) {
::testing::NaggyMock<absl::MockingBitGen> gen;
ON_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillByDefault(Return(145));
ON_CALL(absl::MockPoisson<int>(), Call(gen, _)).WillByDefault(Return(3));
EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
}
TEST(MockingBitGen, StrictMock_NotEnough) {
EXPECT_NONFATAL_FAILURE(
[]() {
::testing::StrictMock<absl::MockingBitGen> gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _))
.WillOnce(Return(145));
}(),
"unsatisfied and active");
}
TEST(MockingBitGen, StrictMock_TooMany) {
::testing::StrictMock<absl::MockingBitGen> gen;
EXPECT_CALL(absl::MockUniform<int>(), Call(gen, _, _)).WillOnce(Return(145));
EXPECT_EQ(absl::Uniform(gen, 1, 1000), 145);
EXPECT_NONFATAL_FAILURE(
[&]() { EXPECT_EQ(absl::Uniform(gen, 0, 1000), 0); }(),
"over-saturated and active");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/mocking_bit_gen.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/mocking_bit_gen_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
213a7c82-6f11-4429-9428-dd1bc9636c7c | cpp | google/quiche | certificate_view | quiche/quic/core/crypto/certificate_view.cc | quiche/quic/core/crypto/certificate_view_test.cc | #include "quiche/quic/core/crypto/certificate_view.h"
#include <algorithm>
#include <cstdint>
#include <istream>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "openssl/base.h"
#include "openssl/bytestring.h"
#include "openssl/digest.h"
#include "openssl/ec.h"
#include "openssl/ec_key.h"
#include "openssl/evp.h"
#include "openssl/nid.h"
#include "openssl/rsa.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/boring_utils.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_time_utils.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
namespace {
using ::quiche::QuicheTextUtils;
constexpr uint8_t kX509Version[] = {0x02, 0x01, 0x02};
constexpr uint8_t kSubjectAltNameOid[] = {0x55, 0x1d, 0x11};
PublicKeyType PublicKeyTypeFromKey(EVP_PKEY* public_key) {
switch (EVP_PKEY_id(public_key)) {
case EVP_PKEY_RSA:
return PublicKeyType::kRsa;
case EVP_PKEY_EC: {
const EC_KEY* key = EVP_PKEY_get0_EC_KEY(public_key);
if (key == nullptr) {
return PublicKeyType::kUnknown;
}
const EC_GROUP* group = EC_KEY_get0_group(key);
if (group == nullptr) {
return PublicKeyType::kUnknown;
}
const int curve_nid = EC_GROUP_get_curve_name(group);
switch (curve_nid) {
case NID_X9_62_prime256v1:
return PublicKeyType::kP256;
case NID_secp384r1:
return PublicKeyType::kP384;
default:
return PublicKeyType::kUnknown;
}
}
case EVP_PKEY_ED25519:
return PublicKeyType::kEd25519;
default:
return PublicKeyType::kUnknown;
}
}
}
PublicKeyType PublicKeyTypeFromSignatureAlgorithm(
uint16_t signature_algorithm) {
switch (signature_algorithm) {
case SSL_SIGN_RSA_PSS_RSAE_SHA256:
return PublicKeyType::kRsa;
case SSL_SIGN_ECDSA_SECP256R1_SHA256:
return PublicKeyType::kP256;
case SSL_SIGN_ECDSA_SECP384R1_SHA384:
return PublicKeyType::kP384;
case SSL_SIGN_ED25519:
return PublicKeyType::kEd25519;
default:
return PublicKeyType::kUnknown;
}
}
QUICHE_EXPORT QuicSignatureAlgorithmVector
SupportedSignatureAlgorithmsForQuic() {
return QuicSignatureAlgorithmVector{
SSL_SIGN_ED25519, SSL_SIGN_ECDSA_SECP256R1_SHA256,
SSL_SIGN_ECDSA_SECP384R1_SHA384, SSL_SIGN_RSA_PSS_RSAE_SHA256};
}
namespace {
std::string AttributeNameToString(const CBS& oid_cbs) {
absl::string_view oid = CbsToStringPiece(oid_cbs);
if (oid.length() == 3 && absl::StartsWith(oid, "\x55\x04")) {
switch (oid[2]) {
case '\x3': return "CN";
case '\x7': return "L";
case '\x8': return "ST";
case '\xa': return "O";
case '\xb': return "OU";
case '\x6': return "C";
}
}
bssl::UniquePtr<char> oid_representation(CBS_asn1_oid_to_text(&oid_cbs));
if (oid_representation == nullptr) {
return absl::StrCat("(", absl::BytesToHexString(oid), ")");
}
return std::string(oid_representation.get());
}
}
std::optional<std::string> X509NameAttributeToString(CBS input) {
CBS name, value;
unsigned value_tag;
if (!CBS_get_asn1(&input, &name, CBS_ASN1_OBJECT) ||
!CBS_get_any_asn1(&input, &value, &value_tag) || CBS_len(&input) != 0) {
return std::nullopt;
}
return absl::StrCat(AttributeNameToString(name), "=",
absl::CHexEscape(CbsToStringPiece(value)));
}
namespace {
template <unsigned inner_tag, char separator,
std::optional<std::string> (*parser)(CBS)>
std::optional<std::string> ParseAndJoin(CBS input) {
std::vector<std::string> pieces;
while (CBS_len(&input) != 0) {
CBS attribute;
if (!CBS_get_asn1(&input, &attribute, inner_tag)) {
return std::nullopt;
}
std::optional<std::string> formatted = parser(attribute);
if (!formatted.has_value()) {
return std::nullopt;
}
pieces.push_back(*formatted);
}
return absl::StrJoin(pieces, std::string({separator}));
}
std::optional<std::string> RelativeDistinguishedNameToString(CBS input) {
return ParseAndJoin<CBS_ASN1_SEQUENCE, '+', X509NameAttributeToString>(input);
}
std::optional<std::string> DistinguishedNameToString(CBS input) {
return ParseAndJoin<CBS_ASN1_SET, ',', RelativeDistinguishedNameToString>(
input);
}
}
std::string PublicKeyTypeToString(PublicKeyType type) {
switch (type) {
case PublicKeyType::kRsa:
return "RSA";
case PublicKeyType::kP256:
return "ECDSA P-256";
case PublicKeyType::kP384:
return "ECDSA P-384";
case PublicKeyType::kEd25519:
return "Ed25519";
case PublicKeyType::kUnknown:
return "unknown";
}
return "";
}
std::optional<quic::QuicWallTime> ParseDerTime(unsigned tag,
absl::string_view payload) {
if (tag != CBS_ASN1_GENERALIZEDTIME && tag != CBS_ASN1_UTCTIME) {
QUIC_DLOG(WARNING) << "Invalid tag supplied for a DER timestamp";
return std::nullopt;
}
const size_t year_length = tag == CBS_ASN1_GENERALIZEDTIME ? 4 : 2;
uint64_t year, month, day, hour, minute, second;
quiche::QuicheDataReader reader(payload);
if (!reader.ReadDecimal64(year_length, &year) ||
!reader.ReadDecimal64(2, &month) || !reader.ReadDecimal64(2, &day) ||
!reader.ReadDecimal64(2, &hour) || !reader.ReadDecimal64(2, &minute) ||
!reader.ReadDecimal64(2, &second) ||
reader.ReadRemainingPayload() != "Z") {
QUIC_DLOG(WARNING) << "Failed to parse the DER timestamp";
return std::nullopt;
}
if (tag == CBS_ASN1_UTCTIME) {
QUICHE_DCHECK_LE(year, 100u);
year += (year >= 50) ? 1900 : 2000;
}
const std::optional<int64_t> unix_time =
quiche::QuicheUtcDateTimeToUnixSeconds(year, month, day, hour, minute,
second);
if (!unix_time.has_value() || *unix_time < 0) {
return std::nullopt;
}
return QuicWallTime::FromUNIXSeconds(*unix_time);
}
PemReadResult ReadNextPemMessage(std::istream* input) {
constexpr absl::string_view kPemBegin = "-----BEGIN ";
constexpr absl::string_view kPemEnd = "-----END ";
constexpr absl::string_view kPemDashes = "-----";
std::string line_buffer, encoded_message_contents, expected_end;
bool pending_message = false;
PemReadResult result;
while (std::getline(*input, line_buffer)) {
absl::string_view line(line_buffer);
QuicheTextUtils::RemoveLeadingAndTrailingWhitespace(&line);
if (!pending_message && absl::StartsWith(line, kPemBegin) &&
absl::EndsWith(line, kPemDashes)) {
result.type = std::string(
line.substr(kPemBegin.size(),
line.size() - kPemDashes.size() - kPemBegin.size()));
expected_end = absl::StrCat(kPemEnd, result.type, kPemDashes);
pending_message = true;
continue;
}
if (pending_message && line == expected_end) {
std::optional<std::string> data =
QuicheTextUtils::Base64Decode(encoded_message_contents);
if (data.has_value()) {
result.status = PemReadResult::kOk;
result.contents = *data;
} else {
result.status = PemReadResult::kError;
}
return result;
}
if (pending_message) {
encoded_message_contents.append(std::string(line));
}
}
bool eof_reached = input->eof() && !pending_message;
return PemReadResult{
(eof_reached ? PemReadResult::kEof : PemReadResult::kError), "", ""};
}
std::unique_ptr<CertificateView> CertificateView::ParseSingleCertificate(
absl::string_view certificate) {
std::unique_ptr<CertificateView> result(new CertificateView());
CBS top = StringPieceToCbs(certificate);
CBS top_certificate, tbs_certificate, signature_algorithm, signature;
if (!CBS_get_asn1(&top, &top_certificate, CBS_ASN1_SEQUENCE) ||
CBS_len(&top) != 0) {
return nullptr;
}
if (
!CBS_get_asn1(&top_certificate, &tbs_certificate, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&top_certificate, &signature_algorithm,
CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&top_certificate, &signature, CBS_ASN1_BITSTRING) ||
CBS_len(&top_certificate) != 0) {
return nullptr;
}
int has_version, has_extensions;
CBS version, serial, signature_algorithm_inner, issuer, validity, subject,
spki, issuer_id, subject_id, extensions_outer;
if (
!CBS_get_optional_asn1(
&tbs_certificate, &version, &has_version,
CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 0) ||
!CBS_get_asn1(&tbs_certificate, &serial, CBS_ASN1_INTEGER) ||
!CBS_get_asn1(&tbs_certificate, &signature_algorithm_inner,
CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&tbs_certificate, &issuer, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&tbs_certificate, &validity, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&tbs_certificate, &subject, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1_element(&tbs_certificate, &spki, CBS_ASN1_SEQUENCE) ||
!CBS_get_optional_asn1(&tbs_certificate, &issuer_id, nullptr,
CBS_ASN1_CONTEXT_SPECIFIC | 1) ||
!CBS_get_optional_asn1(&tbs_certificate, &subject_id, nullptr,
CBS_ASN1_CONTEXT_SPECIFIC | 2) ||
!CBS_get_optional_asn1(
&tbs_certificate, &extensions_outer, &has_extensions,
CBS_ASN1_CONSTRUCTED | CBS_ASN1_CONTEXT_SPECIFIC | 3) ||
CBS_len(&tbs_certificate) != 0) {
return nullptr;
}
result->subject_der_ = CbsToStringPiece(subject);
unsigned not_before_tag, not_after_tag;
CBS not_before, not_after;
if (!CBS_get_any_asn1(&validity, ¬_before, ¬_before_tag) ||
!CBS_get_any_asn1(&validity, ¬_after, ¬_after_tag) ||
CBS_len(&validity) != 0) {
QUIC_DLOG(WARNING) << "Failed to extract the validity dates";
return nullptr;
}
std::optional<QuicWallTime> not_before_parsed =
ParseDerTime(not_before_tag, CbsToStringPiece(not_before));
std::optional<QuicWallTime> not_after_parsed =
ParseDerTime(not_after_tag, CbsToStringPiece(not_after));
if (!not_before_parsed.has_value() || !not_after_parsed.has_value()) {
QUIC_DLOG(WARNING) << "Failed to parse validity dates";
return nullptr;
}
result->validity_start_ = *not_before_parsed;
result->validity_end_ = *not_after_parsed;
result->public_key_.reset(EVP_parse_public_key(&spki));
if (result->public_key_ == nullptr) {
QUIC_DLOG(WARNING) << "Failed to parse the public key";
return nullptr;
}
if (!result->ValidatePublicKeyParameters()) {
QUIC_DLOG(WARNING) << "Public key has invalid parameters";
return nullptr;
}
if (!has_version ||
!CBS_mem_equal(&version, kX509Version, sizeof(kX509Version))) {
QUIC_DLOG(WARNING) << "Bad X.509 version";
return nullptr;
}
if (!has_extensions) {
return nullptr;
}
CBS extensions;
if (!CBS_get_asn1(&extensions_outer, &extensions, CBS_ASN1_SEQUENCE) ||
CBS_len(&extensions_outer) != 0) {
QUIC_DLOG(WARNING) << "Failed to extract the extension sequence";
return nullptr;
}
if (!result->ParseExtensions(extensions)) {
QUIC_DLOG(WARNING) << "Failed to parse extensions";
return nullptr;
}
return result;
}
bool CertificateView::ParseExtensions(CBS extensions) {
while (CBS_len(&extensions) != 0) {
CBS extension, oid, critical, payload;
if (
!CBS_get_asn1(&extensions, &extension, CBS_ASN1_SEQUENCE) ||
!CBS_get_asn1(&extension, &oid, CBS_ASN1_OBJECT) ||
!CBS_get_optional_asn1(&extension, &critical, nullptr,
CBS_ASN1_BOOLEAN) ||
!CBS_get_asn1(&extension, &payload, CBS_ASN1_OCTETSTRING) ||
CBS_len(&extension) != 0) {
QUIC_DLOG(WARNING) << "Bad extension entry";
return false;
}
if (CBS_mem_equal(&oid, kSubjectAltNameOid, sizeof(kSubjectAltNameOid))) {
CBS alt_names;
if (!CBS_get_asn1(&payload, &alt_names, CBS_ASN1_SEQUENCE) ||
CBS_len(&payload) != 0) {
QUIC_DLOG(WARNING) << "Failed to parse subjectAltName";
return false;
}
while (CBS_len(&alt_names) != 0) {
CBS alt_name_cbs;
unsigned int alt_name_tag;
if (!CBS_get_any_asn1(&alt_names, &alt_name_cbs, &alt_name_tag)) {
QUIC_DLOG(WARNING) << "Failed to parse subjectAltName";
return false;
}
absl::string_view alt_name = CbsToStringPiece(alt_name_cbs);
QuicIpAddress ip_address;
switch (alt_name_tag) {
case CBS_ASN1_CONTEXT_SPECIFIC | 2:
subject_alt_name_domains_.push_back(alt_name);
break;
case CBS_ASN1_CONTEXT_SPECIFIC | 7:
if (!ip_address.FromPackedString(alt_name.data(),
alt_name.size())) {
QUIC_DLOG(WARNING) << "Failed to parse subjectAltName IP address";
return false;
}
subject_alt_name_ips_.push_back(ip_address);
break;
default:
QUIC_DLOG(INFO) << "Unknown subjectAltName tag " << alt_name_tag;
continue;
}
}
}
}
return true;
}
std::vector<std::string> CertificateView::LoadPemFromStream(
std::istream* input) {
std::vector<std::string> result;
for (;;) {
PemReadResult read_result = ReadNextPemMessage(input);
if (read_result.status == PemReadResult::kEof) {
return result;
}
if (read_result.status != PemReadResult::kOk) {
return std::vector<std::string>();
}
if (read_result.type != "CERTIFICATE") {
continue;
}
result.emplace_back(std::move(read_result.contents));
}
}
PublicKeyType CertificateView::public_key_type() const {
return PublicKeyTypeFromKey(public_key_.get());
}
bool CertificateView::ValidatePublicKeyParameters() {
PublicKeyType key_type = PublicKeyTypeFromKey(public_key_.get());
switch (key_type) {
case PublicKeyType::kRsa:
return EVP_PKEY_bits(public_key_.get()) >= 2048;
case PublicKeyType::kP256:
case PublicKeyType::kP384:
case PublicKeyType::kEd25519:
return true;
default:
return false;
}
}
bool CertificateView::VerifySignature(absl::string_view data,
absl::string_view signature,
uint16_t signature_algorithm) const {
if (PublicKeyTypeFromSignatureAlgorithm(signature_algorithm) !=
PublicKeyTypeFromKey(public_key_.get())) {
QUIC_BUG(quic_bug_10640_1)
<< "Mismatch between the requested signature algorithm and the "
"type of the public key.";
return false;
}
bssl::ScopedEVP_MD_CTX md_ctx;
EVP_PKEY_CTX* pctx;
if (!EVP_DigestVerifyInit(
md_ctx.get(), &pctx,
SSL_get_signature_algorithm_digest(signature_algorithm), nullptr,
public_key_.get())) {
return false;
}
if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) {
if (!EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) ||
!EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1)) {
return false;
}
}
return EVP_DigestVerify(
md_ctx.get(), reinterpret_cast<const uint8_t*>(signature.data()),
signature.size(), reinterpret_cast<const uint8_t*>(data.data()),
data.size());
}
std::optional<std::string> CertificateView::GetHumanReadableSubject() const {
CBS input = StringPieceToCbs(subject_der_);
return DistinguishedNameToString(input);
}
std::unique_ptr<CertificatePrivateKey> CertificatePrivateKey::LoadFromDer(
absl::string_view private_key) {
std::unique_ptr<CertificatePrivateKey> result(new CertificatePrivateKey());
CBS private_key_cbs = StringPieceToCbs(private_key);
result->private_key_.reset(EVP_parse_private_key(&private_key_cbs));
if (result->private_key_ == nullptr || CBS_len(&private_key_cbs) != 0) {
return nullptr;
}
return result;
}
std::unique_ptr<CertificatePrivateKey> CertificatePrivateKey::LoadPemFromStream(
std::istream* input) {
skip:
PemReadResult result = ReadNextPemMessage(input);
if (result.status != PemReadResult::kOk) {
return nullptr;
}
if (result.type == "PRIVATE KEY") {
return LoadFromDer(result.contents);
}
if (result.type == "RSA PRIVATE KEY") {
CBS private_key_cbs = StringPieceToCbs(result.contents);
bssl::UniquePtr<RSA> rsa(RSA_parse_private_key(&private_key_cbs));
if (rsa == nullptr || CBS_len(&private_key_cbs) != 0) {
return nullptr;
}
std::unique_ptr<CertificatePrivateKey> key(new CertificatePrivateKey());
key->private_key_.reset(EVP_PKEY_new());
EVP_PKEY_assign_RSA(key->private_key_.get(), rsa.release());
return key;
}
if (result.type == "EC PARAMETERS") {
goto skip;
}
if (result.type == "EC PRIVATE KEY") {
CBS private_key_cbs = StringPieceToCbs(result.contents);
bssl::UniquePtr<EC_KEY> ec_key(
EC_KEY_parse_private_key(&private_key_cbs, nullptr));
if (ec_key == nullptr || CBS_len(&private_key_cbs) != 0) {
return nullptr;
}
std::unique_ptr<CertificatePrivateKey> key(new CertificatePrivateKey());
key->private_key_.reset(EVP_PKEY_new());
EVP_PKEY_assign_EC_KEY(key->private_key_.get(), ec_key.release());
return key;
}
return nullptr;
}
std::string CertificatePrivateKey::Sign(absl::string_view input,
uint16_t signature_algorithm) const {
if (!ValidForSignatureAlgorithm(signature_algorithm)) {
QUIC_BUG(quic_bug_10640_2)
<< "Mismatch between the requested signature algorithm and the "
"type of the private key.";
return "";
}
bssl::ScopedEVP_MD_CTX md_ctx;
EVP_PKEY_CTX* pctx;
if (!EVP_DigestSignInit(
md_ctx.get(), &pctx,
SSL_get_signature_algorithm_digest(signature_algorithm),
nullptr, private_key_.get())) {
return "";
}
if (SSL_is_signature_algorithm_rsa_pss(signature_algorithm)) {
if (!EVP_PKEY_CTX_set_rsa_padding(pctx, RSA_PKCS1_PSS_PADDING) ||
!EVP_PKEY_CTX_set_rsa_pss_saltlen(pctx, -1)) {
return "";
}
}
std::string output;
size_t output_size;
if (!EVP_DigestSign(md_ctx.get(), nullptr, &output_size,
reinterpret_cast<const uint8_t*>(input.data()),
input.size())) {
return "";
}
output.resize(output_size);
if (!EVP_DigestSign(
md_ctx.get(), reinterpret_cast<uint8_t*>(&output[0]), &output_size,
reinterpret_cast<const uint8_t*>(input.data()), input.size())) {
return "";
}
output.resize(output_size);
return output;
}
bool CertificatePrivateKey::MatchesPublicKey(
const CertificateView& view) const {
return EVP_PKEY_cmp(view.public_key(), private_key_.get()) == 1;
}
bool CertificatePrivateKey::ValidForSignatureAlgorithm(
uint16_t signature_algorithm) const {
return PublicKeyTypeFromSignatureAlgorithm(signature_algorithm) ==
PublicKeyTypeFromKey(private_key_.get());
}
} | #include "quiche/quic/core/crypto/certificate_view.h"
#include <limits>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "openssl/base.h"
#include "openssl/bytestring.h"
#include "openssl/evp.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/boring_utils.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/test_certificates.h"
#include "quiche/common/platform/api/quiche_time_utils.h"
namespace quic {
namespace test {
namespace {
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Optional;
TEST(CertificateViewTest, PemParser) {
std::stringstream stream(kTestCertificatePem);
PemReadResult result = ReadNextPemMessage(&stream);
EXPECT_EQ(result.status, PemReadResult::kOk);
EXPECT_EQ(result.type, "CERTIFICATE");
EXPECT_EQ(result.contents, kTestCertificate);
result = ReadNextPemMessage(&stream);
EXPECT_EQ(result.status, PemReadResult::kEof);
}
TEST(CertificateViewTest, Parse) {
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(kTestCertificate);
ASSERT_TRUE(view != nullptr);
EXPECT_THAT(view->subject_alt_name_domains(),
ElementsAre(absl::string_view("www.example.org"),
absl::string_view("mail.example.org"),
absl::string_view("mail.example.com")));
EXPECT_THAT(view->subject_alt_name_ips(),
ElementsAre(QuicIpAddress::Loopback4()));
EXPECT_EQ(EVP_PKEY_id(view->public_key()), EVP_PKEY_RSA);
const QuicWallTime validity_start = QuicWallTime::FromUNIXSeconds(
*quiche::QuicheUtcDateTimeToUnixSeconds(2020, 1, 30, 18, 13, 59));
EXPECT_EQ(view->validity_start(), validity_start);
const QuicWallTime validity_end = QuicWallTime::FromUNIXSeconds(
*quiche::QuicheUtcDateTimeToUnixSeconds(2020, 2, 2, 18, 13, 59));
EXPECT_EQ(view->validity_end(), validity_end);
EXPECT_EQ(view->public_key_type(), PublicKeyType::kRsa);
EXPECT_EQ(PublicKeyTypeToString(view->public_key_type()), "RSA");
EXPECT_EQ("C=US,ST=California,L=Mountain View,O=QUIC Server,CN=127.0.0.1",
view->GetHumanReadableSubject());
}
TEST(CertificateViewTest, ParseCertWithUnknownSanType) {
std::stringstream stream(kTestCertWithUnknownSanTypePem);
PemReadResult result = ReadNextPemMessage(&stream);
EXPECT_EQ(result.status, PemReadResult::kOk);
EXPECT_EQ(result.type, "CERTIFICATE");
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(result.contents);
EXPECT_TRUE(view != nullptr);
}
TEST(CertificateViewTest, PemSingleCertificate) {
std::stringstream pem_stream(kTestCertificatePem);
std::vector<std::string> chain =
CertificateView::LoadPemFromStream(&pem_stream);
EXPECT_THAT(chain, ElementsAre(kTestCertificate));
}
TEST(CertificateViewTest, PemMultipleCertificates) {
std::stringstream pem_stream(kTestCertificateChainPem);
std::vector<std::string> chain =
CertificateView::LoadPemFromStream(&pem_stream);
EXPECT_THAT(chain,
ElementsAre(kTestCertificate, HasSubstr("QUIC Server Root CA")));
}
TEST(CertificateViewTest, PemNoCertificates) {
std::stringstream pem_stream("one\ntwo\nthree\n");
std::vector<std::string> chain =
CertificateView::LoadPemFromStream(&pem_stream);
EXPECT_TRUE(chain.empty());
}
TEST(CertificateViewTest, SignAndVerify) {
std::unique_ptr<CertificatePrivateKey> key =
CertificatePrivateKey::LoadFromDer(kTestCertificatePrivateKey);
ASSERT_TRUE(key != nullptr);
std::string data = "A really important message";
std::string signature = key->Sign(data, SSL_SIGN_RSA_PSS_RSAE_SHA256);
ASSERT_FALSE(signature.empty());
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(kTestCertificate);
ASSERT_TRUE(view != nullptr);
EXPECT_TRUE(key->MatchesPublicKey(*view));
EXPECT_TRUE(
view->VerifySignature(data, signature, SSL_SIGN_RSA_PSS_RSAE_SHA256));
EXPECT_FALSE(view->VerifySignature("An unimportant message", signature,
SSL_SIGN_RSA_PSS_RSAE_SHA256));
EXPECT_FALSE(view->VerifySignature(data, "Not a signature",
SSL_SIGN_RSA_PSS_RSAE_SHA256));
}
TEST(CertificateViewTest, PrivateKeyPem) {
std::unique_ptr<CertificateView> view =
CertificateView::ParseSingleCertificate(kTestCertificate);
ASSERT_TRUE(view != nullptr);
std::stringstream pem_stream(kTestCertificatePrivateKeyPem);
std::unique_ptr<CertificatePrivateKey> pem_key =
CertificatePrivateKey::LoadPemFromStream(&pem_stream);
ASSERT_TRUE(pem_key != nullptr);
EXPECT_TRUE(pem_key->MatchesPublicKey(*view));
std::stringstream legacy_stream(kTestCertificatePrivateKeyLegacyPem);
std::unique_ptr<CertificatePrivateKey> legacy_key =
CertificatePrivateKey::LoadPemFromStream(&legacy_stream);
ASSERT_TRUE(legacy_key != nullptr);
EXPECT_TRUE(legacy_key->MatchesPublicKey(*view));
}
TEST(CertificateViewTest, PrivateKeyEcdsaPem) {
std::stringstream pem_stream(kTestEcPrivateKeyLegacyPem);
std::unique_ptr<CertificatePrivateKey> key =
CertificatePrivateKey::LoadPemFromStream(&pem_stream);
ASSERT_TRUE(key != nullptr);
EXPECT_TRUE(key->ValidForSignatureAlgorithm(SSL_SIGN_ECDSA_SECP256R1_SHA256));
}
TEST(CertificateViewTest, DerTime) {
EXPECT_THAT(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024Z"),
Optional(QuicWallTime::FromUNIXSeconds(24)));
EXPECT_THAT(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19710101000024Z"),
Optional(QuicWallTime::FromUNIXSeconds(365 * 86400 + 24)));
EXPECT_THAT(ParseDerTime(CBS_ASN1_UTCTIME, "700101000024Z"),
Optional(QuicWallTime::FromUNIXSeconds(24)));
EXPECT_TRUE(ParseDerTime(CBS_ASN1_UTCTIME, "200101000024Z").has_value());
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, ""), std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024.001Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024Q"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024-0500"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "700101000024ZZ"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024.00Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024.Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "197O0101000024Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101000024.0O1Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "-9700101000024Z"),
std::nullopt);
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "1970-101000024Z"),
std::nullopt);
EXPECT_TRUE(ParseDerTime(CBS_ASN1_UTCTIME, "490101000024Z").has_value());
EXPECT_FALSE(ParseDerTime(CBS_ASN1_UTCTIME, "500101000024Z").has_value());
EXPECT_THAT(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101230000Z"),
Optional(QuicWallTime::FromUNIXSeconds(23 * 3600)));
EXPECT_EQ(ParseDerTime(CBS_ASN1_GENERALIZEDTIME, "19700101240000Z"),
std::nullopt);
}
TEST(CertificateViewTest, NameAttribute) {
std::string unknown_oid;
ASSERT_TRUE(absl::HexStringToBytes("060b2a864886f712040186ee1b0c0454657374",
&unknown_oid));
EXPECT_EQ("1.2.840.113554.4.1.112411=Test",
X509NameAttributeToString(StringPieceToCbs(unknown_oid)));
std::string non_printable;
ASSERT_TRUE(
absl::HexStringToBytes("06035504030c0742656c6c3a2007", &non_printable));
EXPECT_EQ(R"(CN=Bell: \x07)",
X509NameAttributeToString(StringPieceToCbs(non_printable)));
std::string invalid_oid;
ASSERT_TRUE(absl::HexStringToBytes("060255800c0454657374", &invalid_oid));
EXPECT_EQ("(5580)=Test",
X509NameAttributeToString(StringPieceToCbs(invalid_oid)));
}
TEST(CertificateViewTest, SupportedSignatureAlgorithmsForQuicIsUpToDate) {
QuicSignatureAlgorithmVector supported =
SupportedSignatureAlgorithmsForQuic();
for (int i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
uint16_t sigalg = static_cast<uint16_t>(i);
PublicKeyType key_type = PublicKeyTypeFromSignatureAlgorithm(sigalg);
if (absl::c_find(supported, sigalg) == supported.end()) {
EXPECT_EQ(key_type, PublicKeyType::kUnknown);
} else {
EXPECT_NE(key_type, PublicKeyType::kUnknown);
}
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/certificate_view.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/certificate_view_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
862e5cf8-a705-43d3-a4f0-b68033bd09fe | cpp | tensorflow/tensorflow | tensor_matcher | tensorflow/core/framework/tensor_matcher.cc | tensorflow/lite/experimental/shlo/tensor_matcher_test.cc | #include "tensorflow/core/framework/tensor_matcher.h"
#include <stdint.h>
#include <complex>
#include <ostream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/bfloat16.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace test {
namespace {
using tensorflow::Tensor;
template <typename T>
::testing::Matcher<absl::Span<const T>> MakePointwiseMatcher(
absl::Span<const T> target) {
return ::testing::MatcherCast<absl::Span<const T>>(
::testing::Pointwise(::testing::Eq(), target));
}
template <>
::testing::Matcher<absl::Span<const float>> MakePointwiseMatcher(
absl::Span<const float> target) {
return ::testing::MatcherCast<absl::Span<const float>>(
::testing::Pointwise(::testing::FloatEq(), target));
}
template <>
::testing::Matcher<absl::Span<const double>> MakePointwiseMatcher(
absl::Span<const double> target) {
return ::testing::MatcherCast<absl::Span<const double>>(
::testing::Pointwise(::testing::DoubleEq(), target));
}
template <typename T>
bool MatchAndExplainPointwise(absl::Span<const T> value,
absl::Span<const T> target,
::testing::MatchResultListener* listener) {
return MakePointwiseMatcher<T>(target).MatchAndExplain(value, listener);
}
class TensorEqMatcherImpl : public ::testing::MatcherInterface<const Tensor&> {
public:
explicit TensorEqMatcherImpl(const Tensor& target) : target_(target) {}
void DescribeTo(::std::ostream* os) const override {
*os << "data type is " << tensorflow::DataTypeString(target_.dtype())
<< ", and shape is " << target_.shape();
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
*os << ", and tensor data "; \
absl::Span<const T> data(target_.unaligned_flat<T>()); \
MakePointwiseMatcher<T>(data).DescribeTo(os); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
}
}
}
void DescribeNegationTo(::std::ostream* os) const override {
*os << "data type is not " << tensorflow::DataTypeString(target_.dtype())
<< ", or shape is not " << target_.shape();
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
*os << ", or tensor data "; \
absl::Span<const T> data(target_.unaligned_flat<T>()); \
MakePointwiseMatcher<T>(data).DescribeNegationTo(os); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
}
}
}
bool MatchAndExplain(
const Tensor& value,
::testing::MatchResultListener* listener) const override {
const bool dtype_compare = value.dtype() == target_.dtype();
*listener << "whose data type " << tensorflow::DataTypeString(value.dtype())
<< (dtype_compare ? " matches " : " doesn't match ")
<< tensorflow::DataTypeString(target_.dtype());
const bool shape_compare = value.shape() == target_.shape();
*listener << ", whose shape " << value.shape()
<< (shape_compare ? " matches " : " doesn't match ")
<< target_.shape();
if (!dtype_compare || !shape_compare) {
return false;
}
bool result;
switch (target_.dtype()) {
#define CASE_TYPE(T) \
case tensorflow::DataTypeToEnum<T>::value: { \
result = MatchAndExplainPointwise<T>( \
value.unaligned_flat<T>(), target_.unaligned_flat<T>(), listener); \
break; \
}
TF_CALL_POD_STRING_TYPES(CASE_TYPE);
TF_CALL_QUANTIZED_TYPES(CASE_TYPE);
TF_CALL_int4(CASE_TYPE);
TF_CALL_uint4(CASE_TYPE);
#undef CASE_TYPE
default: {
DLOG(FATAL) << "TensorEq matcher unsupported dtype: "
<< tensorflow::DataTypeString(target_.dtype());
result = false;
}
}
return result;
}
private:
const Tensor target_;
};
}
TensorEq::operator ::testing::Matcher<const Tensor&>() const {
return ::testing::MakeMatcher(new TensorEqMatcherImpl(target_));
}
}
} | #include "tensorflow/lite/experimental/shlo/tensor_matcher.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/tensor_with_data.h"
namespace shlo_ref {
namespace {
using ::shlo_ref::testing::TensorEq;
using ::testing::Not;
TEST(TensorMatcherTest, Eq) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), TensorEq(rhs.tensor()));
}
TEST(TensorMatcherTest, NotEqQuantized) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8, DataType::kF32>(
Shape{{1, 3}}, {.5f, 1.0f, 1.5f}, 0.1, 0);
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqType) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI32>(Shape{{1, 3}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqShape) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{3, 1}}, {5, 7, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
TEST(TensorMatcherTest, NotEqData) {
auto lhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 7, 9});
auto rhs = TensorWithData::Create<DataType::kSI8>(Shape{{1, 3}}, {5, 11, 9});
EXPECT_THAT(lhs.tensor(), Not(TensorEq(rhs.tensor())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_matcher.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/tensor_matcher_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
397218b9-d790-4fc3-b0cd-29f95b4e35f6 | cpp | tensorflow/tensorflow | custom_graph_optimizer_registry | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc | tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry_test.cc | #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <string>
#include <unordered_map>
#include "absl/base/call_once.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
namespace grappler {
namespace {
typedef std::unordered_map<string, CustomGraphOptimizerRegistry::Creator>
RegistrationMap;
RegistrationMap* registered_optimizers = nullptr;
RegistrationMap* GetRegistrationMap() {
if (registered_optimizers == nullptr)
registered_optimizers = new RegistrationMap;
return registered_optimizers;
}
typedef std::unordered_map<string, PluginGraphOptimizerRegistry::Creator>
PluginRegistrationMap;
PluginRegistrationMap* GetPluginRegistrationMap() {
static PluginRegistrationMap* registered_plugin_optimizers =
new PluginRegistrationMap;
return registered_plugin_optimizers;
}
typedef std::unordered_map<string, ConfigList> PluginConfigMap;
PluginConfigMap* GetPluginConfigMap() {
static PluginConfigMap* plugin_config_map = new PluginConfigMap;
return plugin_config_map;
}
const ConfigList& DefaultPluginConfigs() {
static ConfigList* default_plugin_configs = new ConfigList(
false,
{{"implementation_selector", RewriterConfig::ON},
{"function_optimization", RewriterConfig::ON},
{"common_subgraph_elimination", RewriterConfig::ON},
{"arithmetic_optimization", RewriterConfig::ON},
{"debug_stripper", RewriterConfig::ON},
{"constant_folding", RewriterConfig::ON},
{"shape_optimization", RewriterConfig::ON},
{"auto_mixed_precision", RewriterConfig::ON},
{"auto_mixed_precision_onednn_bfloat16", RewriterConfig::ON},
{"auto_mixed_precision_mkl", RewriterConfig::ON},
{"auto_mixed_precision_cpu", RewriterConfig::ON},
{"pin_to_host_optimization", RewriterConfig::ON},
{"layout_optimizer", RewriterConfig::ON},
{"remapping", RewriterConfig::ON},
{"loop_optimization", RewriterConfig::ON},
{"dependency_optimization", RewriterConfig::ON},
{"auto_parallel", RewriterConfig::ON},
{"memory_optimization", RewriterConfig::ON},
{"scoped_allocator_optimization", RewriterConfig::ON}});
return *default_plugin_configs;
}
}
std::unique_ptr<CustomGraphOptimizer>
CustomGraphOptimizerRegistry::CreateByNameOrNull(const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it == GetRegistrationMap()->end()) return nullptr;
return std::unique_ptr<CustomGraphOptimizer>(it->second());
}
std::vector<string> CustomGraphOptimizerRegistry::GetRegisteredOptimizers() {
std::vector<string> optimizer_names;
optimizer_names.reserve(GetRegistrationMap()->size());
for (const auto& opt : *GetRegistrationMap())
optimizer_names.emplace_back(opt.first);
return optimizer_names;
}
void CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
const Creator& optimizer_creator, const string& name) {
const auto it = GetRegistrationMap()->find(name);
if (it != GetRegistrationMap()->end()) {
LOG(FATAL) << "CustomGraphOptimizer is registered twice: " << name;
}
GetRegistrationMap()->insert({name, optimizer_creator});
}
std::vector<std::unique_ptr<CustomGraphOptimizer>>
PluginGraphOptimizerRegistry::CreateOptimizers(
const std::set<string>& device_types) {
std::vector<std::unique_ptr<CustomGraphOptimizer>> optimizer_list;
for (auto it = GetPluginRegistrationMap()->begin();
it != GetPluginRegistrationMap()->end(); ++it) {
if (device_types.find(it->first) == device_types.end()) continue;
static absl::once_flag plugin_optimizer_flag;
absl::call_once(plugin_optimizer_flag, [&]() {
LOG(INFO) << "Plugin optimizer for device_type " << it->first
<< " is enabled.";
});
optimizer_list.emplace_back(
std::unique_ptr<CustomGraphOptimizer>(it->second()));
}
return optimizer_list;
}
void PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
const Creator& optimizer_creator, const std::string& device_type,
ConfigList& configs) {
auto ret = GetPluginConfigMap()->insert({device_type, configs});
if (!ret.second) {
LOG(FATAL) << "PluginGraphOptimizer with device_type "
<< device_type << " is registered twice.";
}
GetPluginRegistrationMap()->insert({device_type, optimizer_creator});
}
void PluginGraphOptimizerRegistry::PrintPluginConfigsIfConflict(
const std::set<string>& device_types) {
bool init = false, conflict = false;
ConfigList plugin_configs;
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (!init) {
plugin_configs = cur_plugin_configs;
init = true;
} else {
if (!(plugin_configs == cur_plugin_configs)) {
conflict = true;
break;
}
}
}
if (!conflict) return;
LOG(WARNING) << "Plugins have conflicting configs. Potential performance "
"regression may happen.";
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
string logs = "";
strings::StrAppend(&logs, "disable_model_pruning\t\t",
cur_plugin_configs.disable_model_pruning, "\n");
for (auto const& pair : cur_plugin_configs.toggle_config) {
strings::StrAppend(&logs, pair.first, string(32 - pair.first.size(), ' '),
(pair.second != RewriterConfig::OFF), "\n");
}
LOG(WARNING) << "Plugin's configs for device_type " << device_type << ":\n"
<< logs;
}
}
ConfigList PluginGraphOptimizerRegistry::GetPluginConfigs(
bool use_plugin_optimizers, const std::set<string>& device_types) {
if (!use_plugin_optimizers) return DefaultPluginConfigs();
ConfigList ret_plugin_configs = DefaultPluginConfigs();
for (const auto& device_type : device_types) {
const auto it = GetPluginConfigMap()->find(device_type);
if (it == GetPluginConfigMap()->end()) continue;
auto cur_plugin_configs = it->second;
if (cur_plugin_configs.disable_model_pruning == true)
ret_plugin_configs.disable_model_pruning = true;
for (auto& pair : cur_plugin_configs.toggle_config) {
if (cur_plugin_configs.toggle_config[pair.first] == RewriterConfig::OFF)
ret_plugin_configs.toggle_config[pair.first] = RewriterConfig::OFF;
}
}
return ret_plugin_configs;
}
bool PluginGraphOptimizerRegistry::IsConfigsConflict(
ConfigList& user_config, ConfigList& plugin_config) {
if (plugin_config == DefaultPluginConfigs()) return false;
if (user_config.disable_model_pruning != plugin_config.disable_model_pruning)
return true;
for (auto& pair : user_config.toggle_config) {
if ((user_config.toggle_config[pair.first] == RewriterConfig::ON) &&
(plugin_config.toggle_config[pair.first] == RewriterConfig::OFF))
return true;
}
return false;
}
}
} | #include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
static const char* kTestOptimizerName = "Test";
static const char* kTestPluginOptimizerName = "TestPlugin";
class TestGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
REGISTER_GRAPH_OPTIMIZER_AS(TestGraphOptimizer, "StaticRegister");
TEST(CustomGraphOptimizerRegistryTest, DynamicRegistration) {
std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
std::unique_ptr<const CustomGraphOptimizer> test_optimizer;
ASSERT_EQ(
0, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
EXPECT_EQ(nullptr, test_optimizer);
CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
[]() { return new TestGraphOptimizer; }, "DynamicRegister");
optimizers = CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
ASSERT_EQ(
1, std::count(optimizers.begin(), optimizers.end(), "DynamicRegister"));
test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("DynamicRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(CustomGraphOptimizerRegistryTest, StaticRegistration) {
const std::vector<string> optimizers =
CustomGraphOptimizerRegistry::GetRegisteredOptimizers();
EXPECT_EQ(1,
std::count(optimizers.begin(), optimizers.end(), "StaticRegister"));
std::unique_ptr<const CustomGraphOptimizer> test_optimizer =
CustomGraphOptimizerRegistry::CreateByNameOrNull("StaticRegister");
ASSERT_NE(nullptr, test_optimizer);
EXPECT_EQ(kTestOptimizerName, test_optimizer->name());
}
TEST(GraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestGraphOptimizer; };
EXPECT_DEATH(CustomGraphOptimizerRegistry::RegisterOptimizerOrDie(
creator, "StaticRegister"),
"twice");
}
class TestPluginGraphOptimizer : public CustomGraphOptimizer {
public:
Status Init(
const tensorflow::RewriterConfig_CustomGraphOptimizer* config) override {
return absl::OkStatus();
}
string name() const override { return kTestPluginOptimizerName; }
bool UsesFunctionLibrary() const override { return false; }
Status Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) override {
return absl::OkStatus();
}
};
TEST(PluginGraphOptimizerRegistryTest, CrashesOnDuplicateRegistration) {
const auto creator = []() { return new TestPluginGraphOptimizer; };
ConfigList config_list;
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "GPU",
config_list);
PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(creator, "CPU",
config_list);
EXPECT_DEATH(PluginGraphOptimizerRegistry::RegisterPluginOptimizerOrDie(
creator, "GPU", config_list),
"twice");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
64f4977d-b4ee-450d-b8ec-812ee549e6e9 | cpp | tensorflow/tensorflow | resolve_constant_concatenation | tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc | tensorflow/lite/toco/graph_transformations/tests/resolve_constant_concatenation_test.cc | #include <algorithm>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
#include "tensorflow/lite/toco/tooling_util.h"
namespace toco {
namespace {
template <ArrayDataType A, typename T>
void CopyTensorSegments(const std::vector<Array*>& input_arrays,
const std::vector<int>& array_copy_size,
const int num_elements_concatenated_array,
Array* concatenated_array) {
for (Array* input_array : input_arrays) {
if (!input_array->buffer) {
return;
}
}
auto& concatenated_array_buffer =
concatenated_array->GetMutableBuffer<A>().data;
concatenated_array_buffer.resize(num_elements_concatenated_array);
CHECK(!input_arrays.empty());
CHECK_NE(array_copy_size[0], 0);
const int total_copy_steps =
input_arrays[0]->GetBuffer<A>().data.size() / array_copy_size[0];
std::vector<const T*> src_ptr;
src_ptr.reserve(input_arrays.size());
for (Array* input_array : input_arrays) {
src_ptr.push_back(input_array->GetBuffer<A>().data.data());
}
T* dest_ptr = concatenated_array_buffer.data();
for (int s = 0; s < total_copy_steps; s++) {
for (size_t i = 0; i < input_arrays.size(); i++) {
std::copy(src_ptr[i], src_ptr[i] + array_copy_size[i], dest_ptr);
src_ptr[i] += array_copy_size[i];
dest_ptr += array_copy_size[i];
}
}
}
template <ArrayDataType A>
void ConcatenateTensorBuffers(const std::vector<Array*>& input_arrays,
int concatenation_axis,
Array* concatenated_array) {
int num_elements_concatenated_array = 1;
for (int i = 0; i < concatenated_array->shape().dimensions_count(); i++) {
num_elements_concatenated_array *= concatenated_array->shape().dims()[i];
}
std::vector<int> array_copy_size(input_arrays.size());
int count = 0;
for (Array* input_array : input_arrays) {
const Shape array_shape = input_array->shape();
array_copy_size[count] = 1;
for (int i = concatenation_axis; i < array_shape.dimensions_count(); i++) {
array_copy_size[count] *= array_shape.dims()[i];
}
count++;
}
CopyTensorSegments<A, DataType<A>>(input_arrays, array_copy_size,
num_elements_concatenated_array,
concatenated_array);
}
void SetMinMaxForConcatenedArray(GraphTransformation* transformation,
const std::vector<Array*>& input_arrays,
Array* concatenated_array) {
CHECK(concatenated_array->data_type == ArrayDataType::kFloat);
if (concatenated_array->minmax) return;
double concat_min = std::numeric_limits<double>::infinity();
double concat_max = -std::numeric_limits<double>::infinity();
for (Array* input_array : input_arrays) {
if (!input_array->minmax) return;
const MinMax& input_minmax = input_array->GetMinMax();
concat_min = std::min(concat_min, input_minmax.min);
concat_max = std::max(concat_max, input_minmax.max);
}
MinMax& minmax = concatenated_array->GetOrCreateMinMax();
minmax.min = concat_min;
minmax.max = concat_max;
transformation->AddMessageF("Setting concatenated array min/max to %g,%g",
concat_min, concat_max);
}
}
::tensorflow::Status ResolveConstantConcatenation::Run(Model* model,
std::size_t op_index,
bool* modified) {
*modified = false;
const auto concat_it = model->operators.begin() + op_index;
const auto* concat_base_op = concat_it->get();
if (concat_base_op->type != OperatorType::kConcatenation) {
return absl::OkStatus();
}
const auto* concat_op =
static_cast<const ConcatenationOperator*>(concat_base_op);
for (const std::string& input_name : concat_op->inputs) {
const Operator* input_op = GetOpWithOutput(*model, input_name);
if (input_op) return absl::OkStatus();
if (!IsConstantParameterArray(*model, input_name)) return absl::OkStatus();
if (!model->GetArray(input_name).has_shape()) return absl::OkStatus();
if (model->GetArray(input_name).quantization_params)
return absl::OkStatus();
if (!IsDiscardableArray(*model, input_name)) return absl::OkStatus();
}
const int concatenation_axis = concat_op->axis;
CHECK_EQ(concat_op->outputs.size(), 1);
std::string concatenated_array_name = concat_op->outputs[0];
Array& concatenated_array = model->GetOrCreateArray(concatenated_array_name);
std::vector<Array*> input_arrays;
input_arrays.reserve(concat_op->inputs.size());
for (const std::string& input_name : concat_op->inputs) {
input_arrays.push_back(&model->GetArray(input_name));
}
AddMessageF("Performing constant concat of %s into %s",
absl::StrJoin(concat_op->inputs, ", "), concatenated_array_name);
switch (concatenated_array.data_type) {
case ArrayDataType::kFloat:
ConcatenateTensorBuffers<ArrayDataType::kFloat>(
input_arrays, concatenation_axis, &concatenated_array);
SetMinMaxForConcatenedArray(this, input_arrays, &concatenated_array);
break;
case ArrayDataType::kUint8:
ConcatenateTensorBuffers<ArrayDataType::kUint8>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kInt32:
ConcatenateTensorBuffers<ArrayDataType::kInt32>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kInt64:
ConcatenateTensorBuffers<ArrayDataType::kInt64>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kString:
ConcatenateTensorBuffers<ArrayDataType::kString>(
input_arrays, concatenation_axis, &concatenated_array);
break;
case ArrayDataType::kComplex64:
ConcatenateTensorBuffers<ArrayDataType::kComplex64>(
input_arrays, concatenation_axis, &concatenated_array);
break;
default:
LOG(FATAL) << "ArrayDataType not supported";
}
DeleteOpAndArrays(model, concat_op);
*modified = true;
return absl::OkStatus();
}
} | #include <algorithm>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/toco/graph_transformations/graph_transformations.h"
#include "tensorflow/lite/toco/model.h"
namespace toco {
namespace {
std::vector<testing::Matcher<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error = 1e-5) {
std::vector<testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
matchers.emplace_back(testing::FloatNear(v, max_abs_error));
}
return matchers;
}
}
class ResolveConstantConcatenationTest : public ::testing::Test {
protected:
ResolveConstantConcatenationTest() {}
void PrepareModel(Model* model, int axis) {
const std::string output_name("concat_op_output");
model->flags.add_output_arrays(output_name);
std::vector<std::string> concat_input_names = {"array0", "array1", "array2",
"array3"};
const int kDim = 3;
const int kElementPerDim = 2;
const int kBufSize = 8;
const int kNumArrays = 4;
static float in_buf[kNumArrays][kBufSize] = {
{0., 1., 2., 3., 4., 5., 6., 7.},
{10., 11., 12., 13., 14., 15., 16., 17.},
{20., 21., 22., 23., 24., 25., 26., 27.},
{30., 31., 32., 33., 34., 35., 36., 37.}};
int cnt = 0;
for (const std::string& concat_input_name : concat_input_names) {
Array& in_array = model->GetOrCreateArray(concat_input_name);
in_array.data_type = ArrayDataType::kFloat;
Shape* in_array_shape = in_array.mutable_shape();
std::vector<int>* in_array_shape_dim = in_array_shape->mutable_dims();
for (int i = 0; i < kDim; i++) {
in_array_shape_dim->push_back(kElementPerDim);
}
auto& in_array_buffer =
in_array.GetMutableBuffer<toco::ArrayDataType::kFloat>();
in_array_buffer.data.resize(kBufSize);
float* buf_ptr =
in_array.GetMutableBuffer<toco::ArrayDataType::kFloat>().data.data();
std::copy(in_buf[cnt], in_buf[cnt] + kBufSize, buf_ptr);
cnt++;
}
auto* concatenation_op = new ConcatenationOperator;
concatenation_op->axis = axis;
concatenation_op->inputs = concat_input_names;
concatenation_op->outputs = {output_name};
Array& out_array = model->GetOrCreateArray(concatenation_op->outputs[0]);
out_array.data_type = ArrayDataType::kFloat;
Shape* out_array_shape = out_array.mutable_shape();
std::vector<int>* out_array_shape_dim = out_array_shape->mutable_dims();
out_array_shape_dim->resize(kDim);
for (int i = 0; i < kDim; i++) {
if (i == axis) {
(*out_array_shape_dim)[i] = kNumArrays * kElementPerDim;
} else {
(*out_array_shape_dim)[i] = kElementPerDim;
}
}
model->operators.push_back(std::unique_ptr<Operator>(concatenation_op));
}
};
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis0) {
Model model;
const int axis = 0;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
const auto& concatenated_array = model.GetArray(model.flags.output_arrays(0));
EXPECT_THAT(concatenated_array.GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 2., 3., 4., 5., 6., 7., 10., 11., 12.,
13., 14., 15., 16., 17., 20., 21., 22., 23., 24., 25.,
26., 27., 30., 31., 32., 33., 34., 35., 36., 37.})));
}
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis1) {
Model model;
const int axis = 1;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
auto& concatenated_array = (*model.GetArrayMap().begin()).second;
EXPECT_THAT(concatenated_array->GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 2., 3., 10., 11., 12., 13., 20., 21., 22.,
23., 30., 31., 32., 33., 4., 5., 6., 7., 14., 15.,
16., 17., 24., 25., 26., 27., 34., 35., 36., 37.})));
}
TEST_F(ResolveConstantConcatenationTest, ConcatAtAxis2) {
Model model;
const int axis = 2;
PrepareModel(&model, axis);
GraphTransformationsSet graph_transformation_set;
graph_transformation_set.Add(new toco::ResolveConstantConcatenation);
EXPECT_THAT(model.GetArrayMap().size(), 5);
bool modified;
ASSERT_TRUE((*graph_transformation_set.begin())
->Run(&model, 0, &modified)
.ok());
EXPECT_THAT(model.GetArrayMap().size(), 1);
auto& concatenated_array = (*model.GetArrayMap().begin()).second;
EXPECT_THAT(concatenated_array->GetBuffer<toco::ArrayDataType::kFloat>().data,
ElementsAreArray(ArrayFloatNear(
{0., 1., 10., 11., 20., 21., 30., 31., 2., 3., 12.,
13., 22., 23., 32., 33., 4., 5., 14., 15., 24., 25.,
34., 35., 6., 7., 16., 17., 26., 27., 36., 37.})));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/resolve_constant_concatenation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/toco/graph_transformations/tests/resolve_constant_concatenation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
668bf81b-a8db-4ed7-ae62-1dee10fb1e5c | cpp | google/quiche | tun_device_packet_exchanger | quiche/quic/qbone/bonnet/tun_device_packet_exchanger.cc | quiche/quic/qbone/bonnet/tun_device_packet_exchanger_test.cc | #include "quiche/quic/qbone/bonnet/tun_device_packet_exchanger.h"
#include <netinet/icmp6.h>
#include <netinet/ip6.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "quiche/quic/qbone/platform/icmp_packet.h"
#include "quiche/quic/qbone/platform/netlink_interface.h"
#include "quiche/quic/qbone/qbone_constants.h"
namespace quic {
TunDevicePacketExchanger::TunDevicePacketExchanger(
size_t mtu, KernelInterface* kernel, NetlinkInterface* netlink,
QbonePacketExchanger::Visitor* visitor, size_t max_pending_packets,
bool is_tap, StatsInterface* stats, absl::string_view ifname)
: QbonePacketExchanger(visitor, max_pending_packets),
mtu_(mtu),
kernel_(kernel),
netlink_(netlink),
ifname_(ifname),
is_tap_(is_tap),
stats_(stats) {
if (is_tap_) {
mtu_ += ETH_HLEN;
}
}
bool TunDevicePacketExchanger::WritePacket(const char* packet, size_t size,
bool* blocked, std::string* error) {
*blocked = false;
if (fd_ < 0) {
*error = absl::StrCat("Invalid file descriptor of the TUN device: ", fd_);
stats_->OnWriteError(error);
return false;
}
auto buffer = std::make_unique<QuicData>(packet, size);
if (is_tap_) {
buffer = ApplyL2Headers(*buffer);
}
int result = kernel_->write(fd_, buffer->data(), buffer->length());
if (result == -1) {
if (errno == EWOULDBLOCK || errno == EAGAIN) {
*error =
absl::ErrnoToStatus(errno, "Write to the TUN device was blocked.")
.message();
*blocked = true;
stats_->OnWriteError(error);
}
return false;
}
stats_->OnPacketWritten(result);
return true;
}
std::unique_ptr<QuicData> TunDevicePacketExchanger::ReadPacket(
bool* blocked, std::string* error) {
*blocked = false;
if (fd_ < 0) {
*error = absl::StrCat("Invalid file descriptor of the TUN device: ", fd_);
stats_->OnReadError(error);
return nullptr;
}
auto read_buffer = std::make_unique<char[]>(mtu_);
int result = kernel_->read(fd_, read_buffer.get(), mtu_);
if (result <= 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
*error =
absl::ErrnoToStatus(errno, "Read from the TUN device was blocked.")
.message();
*blocked = true;
stats_->OnReadError(error);
}
return nullptr;
}
auto buffer = std::make_unique<QuicData>(read_buffer.release(), result, true);
if (is_tap_) {
buffer = ConsumeL2Headers(*buffer);
}
if (buffer) {
stats_->OnPacketRead(buffer->length());
}
return buffer;
}
void TunDevicePacketExchanger::set_file_descriptor(int fd) { fd_ = fd; }
const TunDevicePacketExchanger::StatsInterface*
TunDevicePacketExchanger::stats_interface() const {
return stats_;
}
std::unique_ptr<QuicData> TunDevicePacketExchanger::ApplyL2Headers(
const QuicData& l3_packet) {
if (is_tap_ && !mac_initialized_) {
NetlinkInterface::LinkInfo link_info{};
if (netlink_->GetLinkInfo(ifname_, &link_info)) {
memcpy(tap_mac_, link_info.hardware_address, ETH_ALEN);
mac_initialized_ = true;
} else {
QUIC_LOG_EVERY_N_SEC(ERROR, 30)
<< "Unable to get link info for: " << ifname_;
}
}
const auto l2_packet_size = l3_packet.length() + ETH_HLEN;
auto l2_buffer = std::make_unique<char[]>(l2_packet_size);
auto* hdr = reinterpret_cast<ethhdr*>(l2_buffer.get());
memcpy(hdr->h_dest, tap_mac_, ETH_ALEN);
memcpy(hdr->h_source, tap_mac_, ETH_ALEN);
hdr->h_proto = absl::ghtons(ETH_P_IPV6);
memcpy(l2_buffer.get() + ETH_HLEN, l3_packet.data(), l3_packet.length());
return std::make_unique<QuicData>(l2_buffer.release(), l2_packet_size, true);
}
std::unique_ptr<QuicData> TunDevicePacketExchanger::ConsumeL2Headers(
const QuicData& l2_packet) {
if (l2_packet.length() < ETH_HLEN) {
return nullptr;
}
auto* hdr = reinterpret_cast<const ethhdr*>(l2_packet.data());
if (hdr->h_proto != absl::ghtons(ETH_P_IPV6)) {
return nullptr;
}
constexpr auto kIp6PrefixLen = ETH_HLEN + sizeof(ip6_hdr);
constexpr auto kIcmp6PrefixLen = kIp6PrefixLen + sizeof(icmp6_hdr);
if (l2_packet.length() < kIp6PrefixLen) {
return nullptr;
}
auto* ip_hdr = reinterpret_cast<const ip6_hdr*>(l2_packet.data() + ETH_HLEN);
const bool is_icmp = ip_hdr->ip6_ctlun.ip6_un1.ip6_un1_nxt == IPPROTO_ICMPV6;
bool is_neighbor_solicit = false;
if (is_icmp) {
if (l2_packet.length() < kIcmp6PrefixLen) {
return nullptr;
}
is_neighbor_solicit =
reinterpret_cast<const icmp6_hdr*>(l2_packet.data() + kIp6PrefixLen)
->icmp6_type == ND_NEIGHBOR_SOLICIT;
}
if (is_neighbor_solicit) {
auto* icmp6_payload = l2_packet.data() + kIcmp6PrefixLen;
QuicIpAddress target_address(
*reinterpret_cast<const in6_addr*>(icmp6_payload));
if (target_address != *QboneConstants::GatewayAddress()) {
return nullptr;
}
constexpr size_t kIcmpv6OptionSize = 8;
const int payload_size = sizeof(in6_addr) + kIcmpv6OptionSize;
auto payload = std::make_unique<char[]>(payload_size);
memcpy(payload.get(), icmp6_payload, sizeof(in6_addr));
int pos = sizeof(in6_addr);
payload[pos++] = ND_OPT_TARGET_LINKADDR;
payload[pos++] = 1;
memcpy(&payload[pos], tap_mac_, ETH_ALEN);
icmp6_hdr response_hdr{};
response_hdr.icmp6_type = ND_NEIGHBOR_ADVERT;
response_hdr.icmp6_dataun.icmp6_un_data8[0] = 64;
CreateIcmpPacket(ip_hdr->ip6_src, ip_hdr->ip6_src, response_hdr,
absl::string_view(payload.get(), payload_size),
[this](absl::string_view packet) {
bool blocked;
std::string error;
WritePacket(packet.data(), packet.size(), &blocked,
&error);
});
return nullptr;
}
const auto l3_packet_size = l2_packet.length() - ETH_HLEN;
auto shift_buffer = std::make_unique<char[]>(l3_packet_size);
memcpy(shift_buffer.get(), l2_packet.data() + ETH_HLEN, l3_packet_size);
return std::make_unique<QuicData>(shift_buffer.release(), l3_packet_size,
true);
}
} | #include "quiche/quic/qbone/bonnet/tun_device_packet_exchanger.h"
#include <string>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/bonnet/mock_packet_exchanger_stats_interface.h"
#include "quiche/quic/qbone/mock_qbone_client.h"
#include "quiche/quic/qbone/platform/mock_kernel.h"
namespace quic::test {
namespace {
const size_t kMtu = 1000;
const size_t kMaxPendingPackets = 5;
const int kFd = 15;
using ::testing::_;
using ::testing::Invoke;
using ::testing::StrEq;
using ::testing::StrictMock;
class MockVisitor : public QbonePacketExchanger::Visitor {
public:
MOCK_METHOD(void, OnReadError, (const std::string&), (override));
MOCK_METHOD(void, OnWriteError, (const std::string&), (override));
MOCK_METHOD(absl::Status, OnWrite, (absl::string_view), (override));
};
class TunDevicePacketExchangerTest : public QuicTest {
protected:
TunDevicePacketExchangerTest()
: exchanger_(kMtu, &mock_kernel_, nullptr, &mock_visitor_,
kMaxPendingPackets, false, &mock_stats_,
absl::string_view()) {
exchanger_.set_file_descriptor(kFd);
}
~TunDevicePacketExchangerTest() override = default;
MockKernel mock_kernel_;
StrictMock<MockVisitor> mock_visitor_;
StrictMock<MockQboneClient> mock_client_;
StrictMock<MockPacketExchangerStatsInterface> mock_stats_;
TunDevicePacketExchanger exchanger_;
};
TEST_F(TunDevicePacketExchangerTest, WritePacketReturnsFalseOnError) {
std::string packet = "fake packet";
EXPECT_CALL(mock_kernel_, write(kFd, _, packet.size()))
.WillOnce(Invoke([](int fd, const void* buf, size_t count) {
errno = ECOMM;
return -1;
}));
EXPECT_CALL(mock_visitor_, OnWriteError(_));
EXPECT_CALL(mock_visitor_, OnWrite(StrEq(packet))).Times(1);
exchanger_.WritePacketToNetwork(packet.data(), packet.size());
}
TEST_F(TunDevicePacketExchangerTest,
WritePacketReturnFalseAndBlockedOnBlockedTunnel) {
std::string packet = "fake packet";
EXPECT_CALL(mock_kernel_, write(kFd, _, packet.size()))
.WillOnce(Invoke([](int fd, const void* buf, size_t count) {
errno = EAGAIN;
return -1;
}));
EXPECT_CALL(mock_stats_, OnWriteError(_)).Times(1);
EXPECT_CALL(mock_visitor_, OnWrite(StrEq(packet))).Times(1);
exchanger_.WritePacketToNetwork(packet.data(), packet.size());
}
TEST_F(TunDevicePacketExchangerTest, WritePacketReturnsTrueOnSuccessfulWrite) {
std::string packet = "fake packet";
EXPECT_CALL(mock_kernel_, write(kFd, _, packet.size()))
.WillOnce(Invoke([packet](int fd, const void* buf, size_t count) {
EXPECT_THAT(reinterpret_cast<const char*>(buf), StrEq(packet));
return count;
}));
EXPECT_CALL(mock_stats_, OnPacketWritten(_)).Times(1);
EXPECT_CALL(mock_visitor_, OnWrite(StrEq(packet))).Times(1);
exchanger_.WritePacketToNetwork(packet.data(), packet.size());
}
TEST_F(TunDevicePacketExchangerTest, ReadPacketReturnsNullOnError) {
EXPECT_CALL(mock_kernel_, read(kFd, _, kMtu))
.WillOnce(Invoke([](int fd, void* buf, size_t count) {
errno = ECOMM;
return -1;
}));
EXPECT_CALL(mock_visitor_, OnReadError(_));
exchanger_.ReadAndDeliverPacket(&mock_client_);
}
TEST_F(TunDevicePacketExchangerTest, ReadPacketReturnsNullOnBlockedRead) {
EXPECT_CALL(mock_kernel_, read(kFd, _, kMtu))
.WillOnce(Invoke([](int fd, void* buf, size_t count) {
errno = EAGAIN;
return -1;
}));
EXPECT_CALL(mock_stats_, OnReadError(_)).Times(1);
EXPECT_FALSE(exchanger_.ReadAndDeliverPacket(&mock_client_));
}
TEST_F(TunDevicePacketExchangerTest,
ReadPacketReturnsThePacketOnSuccessfulRead) {
std::string packet = "fake_packet";
EXPECT_CALL(mock_kernel_, read(kFd, _, kMtu))
.WillOnce(Invoke([packet](int fd, void* buf, size_t count) {
memcpy(buf, packet.data(), packet.size());
return packet.size();
}));
EXPECT_CALL(mock_client_, ProcessPacketFromNetwork(StrEq(packet)));
EXPECT_CALL(mock_stats_, OnPacketRead(_)).Times(1);
EXPECT_TRUE(exchanger_.ReadAndDeliverPacket(&mock_client_));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device_packet_exchanger.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device_packet_exchanger_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
934c29ae-afea-4099-b759-9b5f96655cef | cpp | tensorflow/tensorflow | stat_summarizer | tensorflow/core/util/stat_summarizer.cc | tensorflow/core/util/stat_summarizer_test.cc | #include "tensorflow/core/util/stat_summarizer.h"
#include <iomanip>
#include <map>
#include <queue>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/framework/tensor_description.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using Detail = StatsCalculator::Detail;
StatSummarizer::StatSummarizer(const StatSummarizerOptions& options)
: stats_calculator_(new StatsCalculator(options)) {}
StatSummarizer::StatSummarizer(const tensorflow::GraphDef& tensorflow_graph)
: stats_calculator_(new StatsCalculator(StatSummarizerOptions())) {}
StatSummarizer::~StatSummarizer() = default;
void StatSummarizer::Validate(const std::vector<TensorDescription>* outputs,
const NodeExecStats& ns) const {
if (outputs->size() != ns.output_size()) {
LOG(WARNING) << "Number of outputs changed between runs for '"
<< ns.node_name() << "' - was " << outputs->size() << ", now "
<< ns.output_size();
} else {
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
const auto& stored = (*outputs)[slot];
const auto& current = output.tensor_description();
bool do_tensors_match =
(stored.dtype() == current.dtype()) &&
(stored.shape().dim_size() == current.shape().dim_size());
if (do_tensors_match) {
for (int i = 0; i < stored.shape().dim_size(); ++i) {
if (stored.shape().dim(i).size() != current.shape().dim(i).size()) {
do_tensors_match = false;
break;
}
}
}
if (!do_tensors_match) {
LOG(WARNING) << "Output tensor changed between runs for '"
<< ns.node_name();
}
}
}
}
void StatSummarizer::PrintStepStats() const {
string output = GetOutputString();
std::istringstream iss(output);
for (std::string line; std::getline(iss, line);) {
LOG(INFO) << line;
}
}
namespace {
std::string OpType(const DeviceStepStats& ds, const NodeExecStats& ns) {
if (absl::StrContains(ds.device(), "/stream") ||
absl::StrContains(ds.device(), "/memcpy")) {
return "<>";
}
const std::string sep(" = ");
const std::string& label = ns.timeline_label();
std::string::size_type start = label.find(sep);
if (start == std::string::npos) return "<>";
start += sep.size();
std::string::size_type end = label.find('(', start);
if (end == std::string::npos) return "<>";
return label.substr(start, end - start);
}
}
void StatSummarizer::ProcessStepStats(const StepStats& step_stats) {
int64_t curr_total_us = 0;
int64_t mem_total = 0;
int node_num = 0;
for (const auto& ds : step_stats.dev_stats()) {
for (const auto& ns : ds.node_stats()) {
if (absl::StrContains(ds.device(), "/stream") &&
!absl::StrContains(ds.device(), "/stream:all")) {
continue;
}
if (absl::StrContains(ds.device(), "/host:CPU")) {
continue;
}
std::string name = ns.node_name();
std::string op_type = "<>";
if (absl::StrContains(ds.device(), "/stream")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2) {
name = parts[0] + " [Kernel]";
op_type = "gpu:" + parts[1];
}
} else if (absl::StrContains(ds.device(), "/memcpy")) {
auto parts = str_util::Split(ns.node_name(), ':');
if (parts.size() == 2 || parts.size() == 3) {
name = parts.front() + " [MemCpy]";
op_type = "gpu:" + parts.back();
}
} else {
op_type = OpType(ds, ns);
}
++node_num;
const int64_t curr_time = ns.all_end_rel_micros();
curr_total_us += curr_time;
auto output_result =
outputs_.emplace(name, std::vector<TensorDescription>());
std::vector<TensorDescription>* outputs = &(output_result.first->second);
int64_t rel_end_us = curr_time;
if (output_result.second) {
outputs->resize(ns.output_size());
for (const auto& output : ns.output()) {
const int32_t slot = output.slot();
if ((slot < 0) || (slot >= ns.output_size())) {
continue;
}
(*outputs)[slot] = output.tensor_description();
}
}
int64_t curr_node_mem = 0;
for (const auto& mem : ns.memory()) {
const int64_t mem_usage = mem.total_bytes();
curr_node_mem += mem_usage;
}
stats_calculator_->AddNodeStats(name, op_type, node_num, rel_end_us,
curr_node_mem);
mem_total += curr_node_mem;
Validate(outputs, ns);
}
}
stats_calculator_->UpdateRunTotalUs(curr_total_us);
stats_calculator_->UpdateMemoryUsed(mem_total);
}
void StatSummarizer::PrintOutputs() const {
std::priority_queue<
std::pair<int64_t, const std::pair<const std::string, Detail>*>>
timings;
for (const auto& entry : stats_calculator_->GetDetails()) {
timings.emplace(-entry.second.run_order, &entry);
}
LOG(INFO) << "============ Node output tensor sizes in run order ========";
while (!timings.empty()) {
auto entry = timings.top();
timings.pop();
std::stringstream stream;
const auto detail_outputs = outputs_.at(entry.second->first);
stream << entry.second->first << "\t" << detail_outputs.size();
for (const auto& tensor : detail_outputs) {
stream << "\t" << DataTypeString(tensor.dtype());
stream << "\t" << tensor.shape().dim_size();
for (const auto& d : tensor.shape().dim()) {
stream << "\t" << d.size();
}
}
LOG(INFO) << stream.str();
}
}
} | #include "tensorflow/core/util/stat_summarizer.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
TEST(StatSummarizerTest, ExtractsOpTypes) {
const std::string graph_def_str(R"EOF(
node {
name: "myconstant"
op: "Const"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
}
float_val: 1.0
}
}
}
}
versions {
producer: 21
}
)EOF");
GraphDef graph_def;
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(graph_def_str, &graph_def));
std::unique_ptr<Session> session(NewSession(SessionOptions()));
ASSERT_TRUE(session != nullptr);
TF_ASSERT_OK(session->Create(graph_def));
RunOptions run_options;
run_options.set_trace_level(RunOptions::FULL_TRACE);
RunMetadata run_metadata;
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(run_options, {}, {"myconstant:0"}, {}, &outputs,
&run_metadata));
StatSummarizer stats(graph_def);
stats.ProcessStepStats(run_metadata.step_stats());
const std::string output = stats.GetOutputString();
const std::string by_node_type = stats.GetStatsByNodeType();
ASSERT_TRUE(absl::StrContains(output, "Const")) << output;
ASSERT_TRUE(absl::StrContains(output, "myconstant")) << output;
ASSERT_TRUE(absl::StrContains(by_node_type, "Const")) << by_node_type;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/stat_summarizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/stat_summarizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03fd3fca-6046-4159-b19f-540a358fdd4f | cpp | abseil/abseil-cpp | stacktrace | absl/debugging/stacktrace.cc | absl/debugging/stacktrace_test.cc | #include "absl/debugging/stacktrace.h"
#include <atomic>
#include "absl/base/attributes.h"
#include "absl/base/port.h"
#include "absl/debugging/internal/stacktrace_config.h"
#if defined(ABSL_STACKTRACE_INL_HEADER)
#include ABSL_STACKTRACE_INL_HEADER
#else
# error Cannot calculate stack trace: will need to write for your environment
# include "absl/debugging/internal/stacktrace_aarch64-inl.inc"
# include "absl/debugging/internal/stacktrace_arm-inl.inc"
# include "absl/debugging/internal/stacktrace_emscripten-inl.inc"
# include "absl/debugging/internal/stacktrace_generic-inl.inc"
# include "absl/debugging/internal/stacktrace_powerpc-inl.inc"
# include "absl/debugging/internal/stacktrace_riscv-inl.inc"
# include "absl/debugging/internal/stacktrace_unimplemented-inl.inc"
# include "absl/debugging/internal/stacktrace_win32-inl.inc"
# include "absl/debugging/internal/stacktrace_x86-inl.inc"
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
typedef int (*Unwinder)(void**, int*, int, int, const void*, int*);
std::atomic<Unwinder> custom;
template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline int Unwind(void** result, int* sizes,
int max_depth, int skip_count,
const void* uc,
int* min_dropped_frames) {
Unwinder f = &UnwindImpl<IS_STACK_FRAMES, IS_WITH_CONTEXT>;
Unwinder g = custom.load(std::memory_order_acquire);
if (g != nullptr) f = g;
int size = (*f)(result, sizes, max_depth, skip_count + 1, uc,
min_dropped_frames);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return size;
}
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackFrames(
void** result, int* sizes, int max_depth, int skip_count) {
return Unwind<true, false>(result, sizes, max_depth, skip_count, nullptr,
nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
GetStackFramesWithContext(void** result, int* sizes, int max_depth,
int skip_count, const void* uc,
int* min_dropped_frames) {
return Unwind<true, true>(result, sizes, max_depth, skip_count, uc,
min_dropped_frames);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int GetStackTrace(
void** result, int max_depth, int skip_count) {
return Unwind<false, false>(result, nullptr, max_depth, skip_count, nullptr,
nullptr);
}
ABSL_ATTRIBUTE_NOINLINE ABSL_ATTRIBUTE_NO_TAIL_CALL int
GetStackTraceWithContext(void** result, int max_depth, int skip_count,
const void* uc, int* min_dropped_frames) {
return Unwind<false, true>(result, nullptr, max_depth, skip_count, uc,
min_dropped_frames);
}
void SetStackUnwinder(Unwinder w) {
custom.store(w, std::memory_order_release);
}
int DefaultStackUnwinder(void** pcs, int* sizes, int depth, int skip,
const void* uc, int* min_dropped_frames) {
skip++;
Unwinder f = nullptr;
if (sizes == nullptr) {
if (uc == nullptr) {
f = &UnwindImpl<false, false>;
} else {
f = &UnwindImpl<false, true>;
}
} else {
if (uc == nullptr) {
f = &UnwindImpl<true, false>;
} else {
f = &UnwindImpl<true, true>;
}
}
volatile int x = 0;
int n = (*f)(pcs, sizes, depth, skip, uc, min_dropped_frames);
x = 1; (void) x;
return n;
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/stacktrace.h"
#include "gtest/gtest.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
namespace {
#if defined(__linux__) && (defined(__x86_64__) || defined(__aarch64__))
ABSL_ATTRIBUTE_NOINLINE void Unwind(void* p) {
ABSL_ATTRIBUTE_UNUSED static void* volatile sink = p;
constexpr int kSize = 16;
void* stack[kSize];
int frames[kSize];
absl::GetStackTrace(stack, kSize, 0);
absl::GetStackFrames(stack, frames, kSize, 0);
}
ABSL_ATTRIBUTE_NOINLINE void HugeFrame() {
char buffer[1 << 20];
Unwind(buffer);
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
TEST(StackTrace, HugeFrame) {
HugeFrame();
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/stacktrace.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/stacktrace_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ed9a3fba-817e-4f60-bc20-9c5d849e2c26 | cpp | tensorflow/tensorflow | ifrt_serving_executable | tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc | tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/extract_callback.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include "tensorflow/compiler/mlir/tfrt/utils/export.h"
#include "tensorflow/compiler/tf2xla/host_compute_metadata.pb.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/pjrt/host_callback.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/hlo/hlo_program.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/python/pjrt_ifrt/pjrt_host_callback.h"
#include "xla/python/pjrt_ifrt/xla_compiler.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_loaded_variable_utils.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_tensor_utils.h"
#include "tensorflow/core/tfrt/ifrt/sharding_utils.h"
#include "tensorflow/core/tfrt/ifrt/tf_host_callback.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/platform/tstring.h"
#include "tfrt/host_context/concurrent_work_queue.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
bool IsSingleDevice(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return compile_metadata.num_replicas() == 1 &&
compile_metadata.num_cores_per_replica() == 1;
}
absl::StatusOr<std::vector<DtypeAndShape>> BuildDtypeAndShape(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const IfrtRestoreTensorRegistry& ifrt_restore_tensor_registry) {
std::vector<DtypeAndShape> dtypes_and_shapes;
dtypes_and_shapes.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs.size(); i++) {
if (variable_index < variable_arg_indices.size() &&
i == variable_arg_indices[variable_index]) {
TF_ASSIGN_OR_RETURN(auto dtype_and_shape,
ifrt_restore_tensor_registry.GetDtypeAndShape(
inputs[i].scalar<tsl::tstring>()()));
dtypes_and_shapes.push_back(std::move(dtype_and_shape));
variable_index++;
} else {
dtypes_and_shapes.push_back(DtypeAndShape{.dtype = inputs[i].dtype(),
.shape = inputs[i].shape()});
}
}
return dtypes_and_shapes;
}
absl::StatusOr<xla::DeviceAssignment> GetRuntimeXlaDeviceAssignment(
const tsl::RCReference<xla::ifrt::DeviceList>& device_list,
int num_replicas, int num_cores_per_replica) {
const int num_devices = num_replicas * num_cores_per_replica;
const absl::Span<xla::ifrt::Device* const> devices = device_list->devices();
if (devices.size() != num_devices) {
return absl::InternalError(
absl::StrCat("Device assignment has ", devices.size(),
" devices, but expected ", num_devices));
}
xla::DeviceAssignment da(num_replicas, num_cores_per_replica);
int device_index = 0;
for (int replica_idx = 0; replica_idx < num_replicas; replica_idx++) {
for (int core_idx = 0; core_idx < num_cores_per_replica;
core_idx++, device_index++) {
da(replica_idx, core_idx) = devices[device_index]->Id().value();
VLOG(3) << "Added IFRT device id: " << da(replica_idx, core_idx);
}
}
return da;
}
static constexpr absl::string_view kDeviceAssignmentAttr = "device_assignment";
static constexpr absl::string_view kEntryFuncName = "main";
absl::StatusOr<std::vector<xla::ifrt::Device*>> GetAssignedDevices(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client,
int num_replicas, int num_cores_per_replica) {
auto op = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!op) {
return absl::InternalError("Could not find entry function in MLIR Module.");
}
auto device_assignment_attr =
op->getAttrOfType<mlir::ArrayAttr>(kDeviceAssignmentAttr);
std::optional<std::vector<int>> device_assignment_attr_val;
if (device_assignment_attr && !device_assignment_attr.getValue().empty()) {
std::vector<int> coords;
coords.reserve(num_replicas * num_cores_per_replica);
for (auto coord_attr : device_assignment_attr.getValue()) {
auto coord_attr_val = mlir::dyn_cast<mlir::IntegerAttr>(coord_attr);
if (!coord_attr_val) {
return absl::InternalError(
llvm::formatv("Device assignment attribute is not an integer: {0}",
device_assignment_attr)
.str());
}
coords.push_back(coord_attr_val.getInt());
}
device_assignment_attr_val = std::move(coords);
}
return GetAssignedIfrtDevices(ifrt_client, num_replicas,
num_cores_per_replica,
device_assignment_attr_val);
}
}
absl::StatusOr<std::unique_ptr<IfrtServingExecutable>>
IfrtServingExecutable::Create(
int64_t program_id, absl::string_view model_name,
absl::string_view signature_name, mlir::OwningOpRef<mlir::ModuleOp> module,
std::shared_ptr<xla::ifrt::Client> client,
tsl::thread::ThreadPool* thread_pool,
IfrtLoadedVariableRegistry* ifrt_loaded_variable_registry,
const IfrtRestoreTensorRegistry* ifrt_restore,
tfrt::ConcurrentWorkQueue* checkpoint_loader_queue,
tensorflow::DeviceMgr* device_mgr,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn,
IfrtServingCoreSelector* ifrt_serving_core_selector,
tsl::protobuf::Message* compilation_environement_proto) {
TF_ASSIGN_OR_RETURN(
tensorflow::tpu::TPUCompileMetadataProto original_compile_metadata,
GetCompileMetadata(*module, *client));
TF_ASSIGN_OR_RETURN(
std::vector<xla::ifrt::Device*> assigned_devices,
GetAssignedDevices(*module, *client,
original_compile_metadata.num_replicas(),
original_compile_metadata.num_cores_per_replica()));
auto executable = absl::WrapUnique(new IfrtServingExecutable(
program_id, model_name, signature_name, std::move(module),
std::move(client), thread_pool, ifrt_loaded_variable_registry,
ifrt_restore, checkpoint_loader_queue, device_mgr,
std::move(shape_representation_fn), ifrt_serving_core_selector,
std::move(original_compile_metadata),
xla::ifrt::BasicDeviceList::Create(xla::ifrt::BasicDeviceList::Devices(
assigned_devices.begin(), assigned_devices.end())),
compilation_environement_proto));
return executable;
}
absl::StatusOr<tsl::RCReference<xla::ifrt::Array>>
IfrtServingExecutable::ConvertTensorToArray(
const tensorflow::Tensor& tensor,
const tsl::RCReference<xla::ifrt::DeviceList>& device_list,
const xla::OpSharding& sharding) {
xla::ifrt::Shape input_shape = ToIfrtShape(tensor.shape());
VLOG(2) << "Converting tensor of shape " << input_shape;
TF_ASSIGN_OR_RETURN(auto hlo_sharding, xla::HloSharding::FromProto(sharding));
return MakeArrayFromTensor(*ifrt_client_, tensor, device_list,
std::move(hlo_sharding), thread_pool_);
}
absl::StatusOr<std::vector<tensorflow::FunctionDef>> BuildFunctionDef(
mlir::ModuleOp module) {
std::vector<tensorflow::FunctionDef> function_defs;
TF_RETURN_IF_ERROR(ExportFunctionDefs(
module,
[&](tensorflow::FunctionDef function_def) {
function_defs.push_back(std::move(function_def));
return absl::OkStatus();
},
false));
return function_defs;
}
struct HostCallbackBuilderInfo {
tensorflow::tf2xla::HostTransferMetadata device_to_host;
tensorflow::tf2xla::HostTransferMetadata host_to_device;
};
absl::StatusOr<absl::flat_hash_map<std::string, HostCallbackBuilderInfo>>
GroupHostCallbackByKey(const Tf2HloResult& tf2hlo_result) {
absl::flat_hash_map<std::string, HostCallbackBuilderInfo> host_callbacks;
for (const auto& device_to_host :
tf2hlo_result.host_compute_metadata.device_to_host()) {
auto& host_callback = host_callbacks[device_to_host.key()];
host_callback.device_to_host = device_to_host;
}
for (const auto& host_to_device :
tf2hlo_result.host_compute_metadata.host_to_device()) {
auto& host_callback = host_callbacks[host_to_device.key()];
host_callback.host_to_device = host_to_device;
}
return host_callbacks;
}
absl::StatusOr<xla::HostCallback> BuildHostCallback(
absl::string_view key, const HostCallbackBuilderInfo& builder_info,
mlir::ModuleOp module, tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
VLOG(2) << "BuildHostCallback for key: " << key;
DCHECK(device_mgr);
xla::HostCallback host_callback;
std::vector<DtypeAndShape> operand_type_and_shapes;
std::vector<DtypeAndShape> result_type_and_shapes;
auto to_xla_shape = [](tensorflow::DataType data_type,
const tensorflow::TensorShapeProto& shape)
-> absl::StatusOr<xla::Shape> {
xla::Shape xla_shape;
TF_ASSIGN_OR_RETURN(tensorflow::TensorShape tensor_shape,
tensorflow::TensorShape::BuildTensorShape(shape));
if (absl::Status status = tensorflow::TensorShapeToXLAShape(
data_type, tensor_shape, &xla_shape);
status.ok()) {
return xla_shape;
} else {
return status;
}
};
operand_type_and_shapes.reserve(builder_info.device_to_host.metadata_size());
result_type_and_shapes.reserve(builder_info.host_to_device.metadata_size());
for (const auto& metadata : builder_info.device_to_host.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.operands.push_back(
{.channel_id = channel_id, .shape = shape});
operand_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
for (const auto& metadata : builder_info.host_to_device.metadata()) {
TF_ASSIGN_OR_RETURN(xla::Shape shape,
to_xla_shape(metadata.type(), metadata.shape()));
uint16_t channel_id = static_cast<uint16_t>(metadata.channel_id());
VLOG(2) << "Channel id: " << channel_id;
host_callback.results.push_back(
{.channel_id = channel_id, .shape = std::move(shape)});
result_type_and_shapes.push_back(
DtypeAndShape{.dtype = metadata.type(), .shape = metadata.shape()});
}
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> callback_module,
ExtractCallbackModule(module, key));
TF_ASSIGN_OR_RETURN(std::vector<tensorflow::FunctionDef> function_defs,
BuildFunctionDef(*callback_module));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TfHostCallback> tf_host_callback,
TfHostCallback::Create(function_defs, key, operand_type_and_shapes,
result_type_and_shapes, device_mgr));
host_callback.callback = [tf_host_callback = tf_host_callback.get()](
void** output, void** input) {
return tf_host_callback->Call(input, output);
};
tf_host_callbacks.push_back(std::move(tf_host_callback));
return host_callback;
}
absl::StatusOr<std::vector<xla::HostCallback>> BuildHostCallbacks(
const Tf2HloResult& tf2hlo_result, mlir::ModuleOp module,
tensorflow::DeviceMgr* device_mgr,
std::vector<std::unique_ptr<TfHostCallback>>& tf_host_callbacks) {
TF_ASSIGN_OR_RETURN(auto host_callback_maps,
GroupHostCallbackByKey(tf2hlo_result));
std::vector<xla::HostCallback> host_callbacks;
host_callbacks.reserve(host_callback_maps.size());
for (const auto& [entry_function, builder_info] : host_callback_maps) {
TF_ASSIGN_OR_RETURN(auto host_callback,
BuildHostCallback(entry_function, builder_info, module,
device_mgr, tf_host_callbacks));
host_callbacks.push_back(std::move(host_callback));
}
return host_callbacks;
}
absl::StatusOr<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::CreateExecutableSynchronously(
mlir::OwningOpRef<mlir::ModuleOp> module_copy,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
TF_ASSIGN_OR_RETURN(
Tf2HloResult tf2hlo_result,
CompileTfToHlo(*module_copy, dtypes_and_shapes, signature_name(),
*ifrt_client_, compile_metadata,
shape_representation_fn_));
const int num_replicas = tf2hlo_result.compile_metadata.num_replicas();
const int num_partitions =
tf2hlo_result.compile_metadata.num_cores_per_replica();
VLOG(2) << " Number of replcas is " << num_replicas
<< " and num_partitions is " << num_partitions;
if (num_replicas > 1) {
return absl::UnimplementedError(
absl::StrCat("Only support single replica, but replica number is ",
num_replicas, " and num_partitions is ", num_partitions));
}
xla::CompileOptions xla_compile_options;
if (compilation_environment_proto_) {
tsl::protobuf::Message* comp_env_copy =
compilation_environment_proto_->New();
comp_env_copy->CopyFrom(*compilation_environment_proto_);
TF_RETURN_IF_ERROR(
xla_compile_options.executable_build_options.mutable_comp_envs()
->AddEnv(absl::WrapUnique<tsl::protobuf::Message>(comp_env_copy)));
}
xla_compile_options.executable_build_options.set_num_replicas(num_replicas);
xla_compile_options.executable_build_options.set_num_partitions(
num_partitions);
xla_compile_options.executable_build_options.set_use_spmd_partitioning(
original_compile_metadata_.use_spmd_for_xla_partitioning());
xla_compile_options.parameter_is_tupled_arguments = false;
if (UsePortableExecution(compile_metadata)) {
xla_compile_options.compile_portable_executable = true;
} else {
TF_ASSIGN_OR_RETURN(
xla::DeviceAssignment da,
GetRuntimeXlaDeviceAssignment(assigned_device_list_, num_replicas,
num_partitions));
VLOG(2) << "Device assignment :" << da.ToString();
xla_compile_options.executable_build_options.set_device_assignment(da);
}
std::vector<std::unique_ptr<TfHostCallback>> tf_host_callbacks;
TF_ASSIGN_OR_RETURN(auto host_callbacks,
BuildHostCallbacks(tf2hlo_result, *module_copy,
device_mgr_, tf_host_callbacks));
std::vector<tsl::RCReference<xla::ifrt::LoadedHostCallback>>
loaded_host_callbacks;
loaded_host_callbacks.reserve(host_callbacks.size());
for (const auto& host_callback : host_callbacks) {
loaded_host_callbacks.push_back(
tsl::MakeRef<xla::ifrt::PjRtHostSendAndRecvLoadedHostCallback>(
ifrt_client_.get(),
std::make_unique<xla::HostCallback>(host_callback)));
}
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::ifrt::LoadedExecutable> ifrt_executable,
ifrt_client_->GetDefaultCompiler()->Compile(
std::make_unique<xla::ifrt::HloProgram>(
tf2hlo_result.mlir_hlo_module.get()),
std::make_unique<xla::ifrt::XlaCompileOptions>(
xla_compile_options, loaded_host_callbacks)));
SharedCachedExecutableBundle executable_bundle =
std::make_shared<CachedExecutableBundle>();
executable_bundle->ifrt_executable = std::move(ifrt_executable);
executable_bundle->compile_metadata =
std::move(tf2hlo_result.compile_metadata);
executable_bundle->host_callbacks = std::move(tf_host_callbacks);
return executable_bundle;
}
xla::ifrt::Future<IfrtServingExecutable::SharedCachedExecutableBundle>
IfrtServingExecutable::LookUpOrCreateExecutable(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
absl::Span<const DtypeAndShape> dtypes_and_shapes) {
std::vector<tensorflow::TensorShape> input_shapes;
for (const auto& dtype_and_shape : dtypes_and_shapes) {
input_shapes.push_back(dtype_and_shape.shape);
}
Key key = {.input_shapes = std::move(input_shapes)};
xla::ifrt::Promise<SharedCachedExecutableBundle> promise;
xla::ifrt::Future<SharedCachedExecutableBundle> future;
mlir::OwningOpRef<mlir::ModuleOp> module_copy;
{
absl::MutexLock lock(&mutex_);
const auto it = executable_bundles_.find(key);
if (it != executable_bundles_.end()) {
return it->second;
}
if (is_frozen_) {
xla::ifrt::Future<SharedCachedExecutableBundle> frozen_future(
absl::FailedPreconditionError(
"Cannot compile for new input shapes after the executable is "
"already frozen."));
return frozen_future;
}
promise = xla::ifrt::Future<SharedCachedExecutableBundle>::CreatePromise();
future = xla::ifrt::Future<SharedCachedExecutableBundle>(promise);
executable_bundles_.emplace(key, future);
module_copy = mlir::OwningOpRef<mlir::ModuleOp>(module_->clone());
}
LOG(INFO) << "Cache missed. Building executable";
absl::StatusOr<SharedCachedExecutableBundle> executable_bundle =
CreateExecutableSynchronously(std::move(module_copy), compile_metadata,
dtypes_and_shapes);
promise.Set(std::move(executable_bundle));
return future;
}
void IfrtServingExecutable::Freeze() {
LOG(INFO) << "Freezing executable. Program id: " << program_id_;
absl::MutexLock lock(&mutex_);
is_frozen_ = true;
module_ = nullptr;
}
bool IfrtServingExecutable::UsePortableExecution(
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata) {
return IsSingleDevice(compile_metadata) && ifrt_serving_core_selector_;
}
absl::StatusOr<std::vector<tensorflow::Tensor>> IfrtServingExecutable::Execute(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices) {
for (int i = 1; i < variable_arg_indices.size(); i++) {
if (variable_arg_indices[i] <= variable_arg_indices[i - 1]) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected variable_arg_indices in ascending order. But subsequence "
"starting at ",
i - 1, ": (", variable_arg_indices[i - 1], ", ",
variable_arg_indices[i], ")", " is not in ascending order"));
}
}
if (!variable_arg_indices.empty() &&
inputs.size() <= variable_arg_indices.back()) {
return absl::FailedPreconditionError(absl::StrCat(
"Expected at most ", inputs.size(), " inputs, but got up to ",
variable_arg_indices.back(), " variables."));
}
for (const int i : variable_arg_indices) {
if (inputs[i].dtype() != tensorflow::DT_STRING ||
!tensorflow::TensorShapeUtils::IsScalar(inputs[i].shape())) {
return absl::FailedPreconditionError(
absl::StrCat("Expected a scalar tensor as loaded variable array key, "
"but got type ",
inputs[i].dtype(), " and shape ",
inputs[i].shape().DebugString(), " at index ", i));
}
}
TF_ASSIGN_OR_RETURN(std::vector<DtypeAndShape> dtypes_and_shapes,
BuildDtypeAndShape(inputs, variable_arg_indices,
ifrt_restore_tensor_registry_));
tensorflow::tpu::TPUCompileMetadataProto compile_metadata =
original_compile_metadata_;
TF_RETURN_IF_ERROR(
UpdateCompileMetadata(compile_metadata, dtypes_and_shapes));
tsl::DeviceReservation device_reservation(kNoCoreSelectedIndex, nullptr);
tsl::RCReference<xla::ifrt::DeviceList> device_list;
if (UsePortableExecution(compile_metadata)) {
device_reservation =
ifrt_serving_core_selector_->ReserveDevice(program_id_);
compile_metadata.clear_device_assignment();
TF_ASSIGN_OR_RETURN(xla::ifrt::Device * device,
ifrt_client_->LookupDevice(xla::ifrt::DeviceId(
device_reservation.device_index())));
device_list = xla::ifrt::BasicDeviceList::Create(
xla::ifrt::BasicDeviceList::Devices({device}));
} else {
device_list = assigned_device_list_;
}
TF_ASSIGN_OR_RETURN(SharedCachedExecutableBundle executable_bundle,
LookUpOrCreateExecutable(
compile_metadata, absl::MakeSpan(dtypes_and_shapes))
.Await());
if (executable_bundle->compile_metadata.args().size() !=
dtypes_and_shapes.size()) {
return absl::InternalError(absl::StrCat(
"Expected ", executable_bundle->compile_metadata.args().size(),
" but got ", dtypes_and_shapes.size(), " arguments"));
}
TF_RETURN_IF_ERROR(AsyncLoadIfrtArray(inputs, variable_arg_indices,
*executable_bundle, device_list));
VLOG(2) << "Completed AsyncLoadIfrtArray";
std::vector<tsl::RCReference<xla::ifrt::Array>> args;
args.reserve(inputs.size());
int variable_index = 0;
for (int i = 0; i < inputs.size(); i++) {
if (variable_index < variable_arg_indices.size() &&
i == variable_arg_indices[variable_index]) {
std::vector<int> device_ids;
device_ids.reserve(device_list->size());
for (xla::ifrt::Device* device : device_list->devices()) {
device_ids.push_back(device->Id().value());
}
TF_ASSIGN_OR_RETURN(
xla::HloSharding hlo_sharding,
xla::HloSharding::FromProto(
executable_bundle->compile_metadata.args()[i].sharding()));
IfrtLoadedVariableRegistry::Key key{
.device_ids = std::move(device_ids),
.input_name = inputs[i].scalar<tsl::tstring>()(),
.hlo_sharding = std::move(hlo_sharding),
};
TF_ASSIGN_OR_RETURN(
auto loaded_variable,
ifrt_loaded_variable_registry_.GetLoadedVariable(key));
TF_ASSIGN_OR_RETURN(tsl::RCReference<xla::ifrt::Array> single_array,
loaded_variable.array.Await());
args.push_back(std::move(single_array));
variable_index++;
} else {
TF_ASSIGN_OR_RETURN(
auto single_array,
ConvertTensorToArray(
inputs[i], device_list,
executable_bundle->compile_metadata.args()[i].sharding()));
args.push_back(single_array);
}
}
DCHECK_EQ(args.size(), dtypes_and_shapes.size());
VLOG(2) << "Start Execution";
std::optional<tsl::RCReference<xla::ifrt::DeviceList>> execution_device_list;
if (UsePortableExecution(compile_metadata)) {
execution_device_list = device_list;
}
TF_ASSIGN_OR_RETURN(
auto execution_result,
executable_bundle->ifrt_executable->Execute(
absl::MakeSpan(args), {.fill_status = true},
std::move(execution_device_list)));
auto status = execution_result.status.Await();
TF_RETURN_IF_ERROR(status);
if (executable_bundle->compile_metadata.retvals().size() !=
execution_result.outputs.size()) {
return absl::InternalError(absl::StrCat(
"Expect ", executable_bundle->compile_metadata.retvals().size(),
" but got ", execution_result.outputs.size(), " outputs"));
}
std::vector<xla::ifrt::Future<tensorflow::Tensor>> output_futures;
output_futures.reserve(execution_result.outputs.size());
for (int i = 0; i < execution_result.outputs.size(); ++i) {
tensorflow::TensorShape tensor_shape;
const tsl::RCReference<xla::ifrt::Array>& array_for_copy =
execution_result.outputs[i];
const tpu::TPUCompileMetadataProto::Retval& metadata_retval =
executable_bundle->compile_metadata.retvals()[i];
VLOG(2) << "Output sharding: " << array_for_copy->sharding().DebugString();
TF_ASSIGN_OR_RETURN(auto hlo_sharding, xla::HloSharding::FromProto(
metadata_retval.sharding()));
output_futures.push_back(MakeTensorFromArray(*ifrt_client_, *array_for_copy,
hlo_sharding, device_list,
thread_pool_));
}
std::vector<tensorflow::Tensor> outputs;
outputs.reserve(output_futures.size());
for (auto& output_future : output_futures) {
TF_ASSIGN_OR_RETURN(auto tensor, output_future.Await());
outputs.push_back(std::move(tensor));
}
return outputs;
}
absl::Status IfrtServingExecutable::AsyncLoadIfrtArray(
absl::Span<const tensorflow::Tensor> inputs,
absl::Span<const int> variable_arg_indices,
const CachedExecutableBundle& executable_bundle,
const tsl::RCReference<xla::ifrt::DeviceList>& devices) {
for (const int i : variable_arg_indices) {
if (inputs[i].dtype() != tensorflow::DT_STRING ||
!tensorflow::TensorShapeUtils::IsScalar(inputs[i].shape())) {
return absl::FailedPreconditionError(
absl::StrCat("Expected a scalar tensor as loaded variable array key, "
"but got type ",
inputs[i].dtype(), " and shape ",
inputs[i].shape().DebugString(), " at index ", i));
}
std::string runtime_name = inputs[i].scalar<tsl::tstring>()();
TF_ASSIGN_OR_RETURN(
xla::HloSharding hlo_sharding,
xla::HloSharding::FromProto(
executable_bundle.compile_metadata.args()[i].sharding()));
VariableDeviceShardingConfig sharding_config{
.hlo_sharding = std::move(hlo_sharding),
};
for (xla::ifrt::Device* device : devices->devices()) {
sharding_config.device_ids.push_back(device->Id().value());
}
TF_RETURN_IF_ERROR(
ifrt_serving::AsyncLoadRestoredTensorAsIfrtLoadedVariable(
runtime_name, ifrt_client_, thread_pool_,
ifrt_restore_tensor_registry_, ifrt_loaded_variable_registry_,
checkpoint_loader_queue_, sharding_config));
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/future.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/serving_device_selector.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_matcher.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/tstring.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using tensorflow::ifrt_serving::test_utils::GetMlirModulePath;
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::TensorEq;
using ::testing::ElementsAre;
using ::testing::Return;
using ::tsl::testing::StatusIs;
struct VariableInputTestParam {
std::vector<tensorflow::Tensor> in_tensors;
std::vector<bool>
is_variable;
std::vector<tensorflow::Tensor> expected_out_tensors;
};
using VariableInputTest = ::testing::TestWithParam<VariableInputTestParam>;
class IfrtServingExecutableTest : public ::testing::Test {
protected:
explicit IfrtServingExecutableTest() {
helper_ = std::make_unique<test_utils::IfrtServingExecutableTestHelper>(
&selector_);
}
tsl::test_util::MockServingDeviceSelector selector_;
std::unique_ptr<test_utils::IfrtServingExecutableTestHelper> helper_;
};
TEST_F(IfrtServingExecutableTest, Basic) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillOnce(Return(tsl::DeviceReservation(0, nullptr)));
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
const auto expected_out =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, MultipleShapes) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(6)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
const auto expected_out2 =
AsTensor<int32_t>({30}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs1, outputs2;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
TF_ASSERT_OK_AND_ASSIGN(outputs2,
executable->Execute(absl::MakeSpan(inputs2), {}));
}
ASSERT_EQ(executable->num_executables(), 2);
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
EXPECT_THAT(outputs2, ElementsAre(TensorEq(expected_out2)));
}
TEST_F(IfrtServingExecutableTest, ReturnFailOnUncompiledShapeAfterFrozen) {
int64_t program_id = 123456;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(3)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable =
helper_->MakeExecutable(program_id, GetMlirModulePath("executable.mlir"));
auto x1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y1 = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
const auto expected_out1 =
AsTensor<int32_t>({14}, tensorflow::TensorShape({1, 1}));
std::vector<tensorflow::Tensor> inputs1{x1, y1};
std::vector<tensorflow::Tensor> outputs1;
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs1), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
executable->Freeze();
outputs1.clear();
TF_ASSERT_OK_AND_ASSIGN(outputs1,
executable->Execute(absl::MakeSpan(inputs1), {}));
EXPECT_THAT(outputs1, ElementsAre(TensorEq(expected_out1)));
auto x2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({1, 4}));
auto y2 = AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({4, 1}));
std::vector<tensorflow::Tensor> inputs2{x2, y2};
std::vector<tensorflow::Tensor> outputs2;
auto status = executable->Execute(absl::MakeSpan(inputs2), {});
EXPECT_THAT(status, StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_F(IfrtServingExecutableTest, Spmd) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result, ElementsAre(TensorEq(expected_out)));
}
TEST_F(IfrtServingExecutableTest, SpmdTwoReturns) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id))).Times(0);
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("spmd_executable_two_returns.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3, 4, 5, 6, 7, 8},
tensorflow::TensorShape({4, 2}));
auto y = AsTensor<int32_t>({11, 12, 13, 14, 15, 16, 17, 18},
tensorflow::TensorShape({4, 2}));
auto z = AsTensor<int32_t>({21, 22, 23, 24, 25, 26, 27, 28},
tensorflow::TensorShape({4, 2}));
const auto expected_out0 = AsTensor<int32_t>({33, 36, 39, 42, 45, 48, 51, 54},
tensorflow::TensorShape({4, 2}));
const auto expected_out1 = AsTensor<int32_t>({20, 20, 20, 20, 20, 20, 20, 20},
tensorflow::TensorShape({4, 2}));
std::vector<tensorflow::Tensor> inputs{x, y, z};
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
EXPECT_THAT(result,
ElementsAre(TensorEq(expected_out0), TensorEq(expected_out1)));
}
TEST_F(IfrtServingExecutableTest, NoReturn) {
int64_t program_id = 111111;
EXPECT_CALL(selector_, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper_->MakeExecutable(
program_id, GetMlirModulePath("executable_no_return.mlir"));
auto x = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({1, 3}));
auto y = AsTensor<int32_t>({1, 2, 3}, tensorflow::TensorShape({3, 1}));
std::vector<tensorflow::Tensor> inputs{x, y};
for (int i = 0; i < helper_->num_cores(); i++) {
TF_ASSERT_OK(executable->Execute(absl::MakeSpan(inputs), {}).status());
}
TF_ASSERT_OK_AND_ASSIGN(auto result,
executable->Execute(absl::MakeSpan(inputs), {}));
ASSERT_EQ(result.size(), 0);
}
TEST_P(VariableInputTest, InterleaveVariable) {
tsl::test_util::MockServingDeviceSelector device_selector;
test_utils::IfrtServingExecutableTestHelper helper(&device_selector);
int64_t program_id = 111111;
EXPECT_CALL(device_selector, ReserveDevice(absl::StrCat(program_id)))
.Times(1)
.WillRepeatedly(
[](::testing::Unused) { return tsl::DeviceReservation(0, nullptr); });
auto executable = helper.MakeExecutable(
program_id, GetMlirModulePath("executable_long_inputs.mlir"));
IfrtRestoreTensorRegistry* ifrt_restore_tensor_registry =
helper.ifrt_restore_tensor_registry();
std::vector<tensorflow::Tensor> inputs;
std::vector<int> loaded_variable_indices;
for (int i = 0; i < GetParam().in_tensors.size(); i++) {
if (GetParam().is_variable[i]) {
auto input_tensor_promise =
xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto input_tensor_future =
xla::ifrt::Future<tensorflow::Tensor>(input_tensor_promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restore_tensor_info = {
.dtype_and_shape{.dtype = GetParam().in_tensors[i].dtype(),
.shape = GetParam().in_tensors[i].shape()},
.tensor_future = input_tensor_future};
std::string variable_name = absl::StrCat("variable_", i);
ASSERT_OK(ifrt_restore_tensor_registry->TryRegister(variable_name,
restore_tensor_info));
loaded_variable_indices.push_back(i);
input_tensor_promise.Set(GetParam().in_tensors[i]);
tensorflow::Tensor key_tensor(tensorflow::DT_STRING, {});
key_tensor.scalar<tsl::tstring>()() = variable_name;
inputs.push_back(key_tensor);
} else {
inputs.push_back(GetParam().in_tensors[i]);
}
}
ASSERT_EQ(inputs.size(), GetParam().is_variable.size());
for (int i = 0; i < helper.num_cores(); i++) {
TF_ASSERT_OK(executable
->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices))
.status());
}
TF_ASSERT_OK_AND_ASSIGN(
auto result,
executable->Execute(absl::MakeSpan(inputs),
absl::MakeSpan(loaded_variable_indices)));
EXPECT_THAT(result,
ElementsAre(TensorEq(GetParam().expected_out_tensors[0]),
TensorEq(GetParam().expected_out_tensors[1]),
TensorEq(GetParam().expected_out_tensors[2])));
}
INSTANTIATE_TEST_SUITE_P(
VariableInputTests, VariableInputTest,
::testing::ValuesIn<VariableInputTestParam>(
{
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, true, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, false, false, true, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, true, false, false, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {true, false, false, true, false},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
{
.in_tensors =
{
AsTensor<int32_t>({2, 2}, TensorShape({1, 2})),
AsTensor<int32_t>({3, 3}, TensorShape({2, 1})),
AsTensor<int32_t>({4, 4}, TensorShape({1, 2})),
AsTensor<int32_t>({5, 5}, TensorShape({2, 1})),
AsTensor<int32_t>({10, 10}, TensorShape({1, 2})),
},
.is_variable = {false, true, true, false, true},
.expected_out_tensors =
{
AsTensor<int32_t>({12}, TensorShape({1, 1})),
AsTensor<int32_t>({40}, TensorShape({1, 1})),
AsTensor<int32_t>({100}, TensorShape({1, 1})),
},
},
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_serving_executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b22a5836-c249-40c3-848e-f8cb6f761433 | cpp | tensorflow/tensorflow | random | tensorflow/compiler/tf2xla/lib/random.cc | third_party/xla/third_party/tsl/tsl/platform/random_test.cc | #include "tensorflow/compiler/tf2xla/lib/random.h"
#include <cmath>
#include <limits>
#include "xla/client/lib/constants.h"
#include "xla/client/lib/math.h"
#include "xla/client/xla_builder.h"
#include "xla/xla_data.pb.h"
namespace tensorflow {
xla::XlaOp TruncatedNormal(xla::XlaOp uniform) {
const double kA = -2.0;
const double kB = 2.0;
const double kMu = 0.0;
const double kSigma = 1.0;
return ParameterizedTruncatedNormal(
uniform, xla::ScalarLike(uniform, kMu), xla::ScalarLike(uniform, kSigma),
xla::ScalarLike(uniform, kA), xla::ScalarLike(uniform, kB));
}
xla::XlaOp ParameterizedTruncatedNormal(xla::XlaOp uniform, xla::XlaOp mu,
xla::XlaOp sigma, xla::XlaOp a,
xla::XlaOp b) {
xla::XlaOp one = xla::ScalarLike(uniform, 1.0);
xla::XlaOp two = xla::ScalarLike(uniform, 2.0);
xla::XlaOp sqrt_2 = xla::ScalarLike(uniform, std::sqrt(2.0));
auto normal_cdf = [&](xla::XlaOp x) {
return (one + xla::Erf(x / sqrt_2)) / two;
};
xla::XlaOp alpha = (a - mu) / sigma;
xla::XlaOp beta = (b - mu) / sigma;
xla::XlaOp alpha_normal_cdf = normal_cdf(alpha);
xla::XlaOp beta_normal_cdf = normal_cdf(beta);
xla::XlaOp p =
alpha_normal_cdf + (beta_normal_cdf - alpha_normal_cdf) * uniform;
xla::XlaOp v = two * p - one;
xla::PrimitiveType primitive_type =
uniform.builder()->GetShape(uniform).value().element_type();
xla::XlaOp epsilon = xla::Epsilon(uniform.builder(), primitive_type);
v = xla::Clamp(-one + epsilon, v, one - epsilon);
xla::XlaOp x = mu + sigma * sqrt_2 * xla::ErfInv(v);
x = xla::Clamp(a, x, b);
return x;
}
} | #include "tsl/platform/random.h"
#include <set>
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(New64Test, SanityCheck) {
std::set<uint64> values;
for (int i = 0; i < 1000000; i++) {
uint64 x = New64();
EXPECT_TRUE(values.insert(x).second) << "duplicate " << x;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/lib/random.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/random_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cbb3b921-f947-46ca-b80d-a3cd1c02ea80 | cpp | tensorflow/tensorflow | dynamic_slice_fusion_rewriter | third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.cc | third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter_test.cc | #include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_constants.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/while_loop_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tools/hlo_extractor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using DefUseDataflowPath = absl::InlinedVector<HloInstruction*, 2>;
using DefUseDataflowPaths = absl::InlinedVector<DefUseDataflowPath, 4>;
using UseDefDataflowPath = absl::InlinedVector<HloInstruction*, 4>;
using UseDefDataflowPaths = absl::InlinedVector<HloInstruction*, 8>;
using DataflowPathView = absl::Span<HloInstruction* const>;
using DataflowPathsView = absl::Span<DataflowPathView>;
using InstructionSet = absl::flat_hash_set<HloInstruction*>;
using OffsetValueMap =
absl::flat_hash_map<HloInstruction*, std::vector<Literal>>;
bool IsNoOp(const HloInstruction* hlo) {
return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple,
HloOpcode::kGetTupleElement>(hlo);
}
bool IsCustomCall(const HloInstruction* hlo, absl::string_view platform_name) {
auto* custom_call = DynCast<HloCustomCallInstruction>(hlo);
if (custom_call == nullptr) return false;
if (custom_call->shape().IsTuple() &&
absl::c_any_of(
custom_call->shape().tuple_shapes(),
[&](const Shape& sub_shape) { return sub_shape.IsToken(); }))
return false;
const std::string call_target_name = custom_call->custom_call_target();
bool is_ffi_custom_call =
custom_call->api_version() == CustomCallApiVersion::API_VERSION_TYPED_FFI;
void* call_target = CustomCallTargetRegistry::Global()->Lookup(
call_target_name, std::string(platform_name));
absl::StatusOr<ffi::HandlerRegistration> handler_registration =
ffi::FindHandler(call_target_name, platform_name);
bool found_custom_call = !is_ffi_custom_call && call_target != nullptr;
bool found_ffi_handler = is_ffi_custom_call && handler_registration.ok();
return found_custom_call || found_ffi_handler;
}
bool IsAlignedSlice(const HloInstruction* slice) {
DCHECK(slice->opcode() == HloOpcode::kSlice ||
slice->opcode() == HloOpcode::kDynamicSlice ||
slice->opcode() == HloOpcode::kDynamicUpdateSlice)
<< "Unknown slice operation: " << slice->ToString();
if (!IsContiguousSlice(*slice)) return false;
auto [full_shape, slice_shape] = [&] {
if (auto* dus = DynCast<HloDynamicUpdateSliceInstruction>(slice)) {
return std::make_pair(dus->shape(), dus->update()->shape());
}
return std::make_pair(slice->operand(0)->shape(), slice->shape());
}();
auto strides = ShapeUtil::ByteStrides(slice_shape);
if (!strides.has_value()) return false;
for (auto dim : slice_shape.layout().minor_to_major()) {
if ((strides.value()[dim] % kXlaAllocatedBufferAlignBytes) == 0) {
return true;
}
if (slice_shape.dimensions(dim) < full_shape.dimensions(dim)) {
return (slice->opcode() == HloOpcode::kSlice &&
(((*strides)[dim] * slice->slice_starts(dim)) %
kXlaAllocatedBufferAlignBytes ==
0));
}
}
return true;
}
std::optional<int64_t> GetWhileLoopTripCount(HloInstruction* whileop) {
CHECK(whileop->opcode() == HloOpcode::kWhile);
auto backend_config = whileop->backend_config<WhileLoopBackendConfig>();
if (!backend_config.ok() || !backend_config.value().has_known_trip_count()) {
VLOG(4) << "Backend config not ok. Computing while loop trip count for "
<< whileop->name();
return ComputeWhileLoopTripCount(whileop);
}
int trip_count = backend_config.value().known_trip_count().n();
VLOG(4) << "Found trip count in backend config for " << whileop->name()
<< ": " << trip_count;
return trip_count;
}
std::optional<std::vector<Literal>> GetValues(const HloInstruction* idx) {
VLOG(3) << "Getting values for " << idx->name();
const HloComputation* computation = idx->parent();
if (!computation->IsWhileBodyComputation()) {
VLOG(3) << "While calculating offset values for " << idx->name()
<< ", the parent computation(" << computation->name()
<< ") is not a while computation";
return std::nullopt;
}
HloInstruction* whileop = computation->WhileCallInstruction();
std::optional<int64_t> trip_count = GetWhileLoopTripCount(whileop);
if (trip_count == std::nullopt) {
VLOG(3) << "Unable to get trip count for " << whileop->name();
return std::nullopt;
}
auto root_tuple = computation->root_instruction();
if (root_tuple->opcode() != HloOpcode::kTuple) {
VLOG(3) << "Root operation " << root_tuple->name() << " of computation "
<< computation->name()
<< " expected to be a tuple because it is a while body. Found: "
<< root_tuple->opcode();
return std::nullopt;
}
std::optional<int64_t> loop_indvar_tuple_idx =
GetLoopInductionVarTupleIdx(whileop);
if (loop_indvar_tuple_idx == std::nullopt) {
VLOG(3) << "Unable to find tuple index for loop induction variable";
return std::nullopt;
}
auto update_operation =
computation->root_instruction()->operand(*loop_indvar_tuple_idx);
HloInstruction* loop_indvar = nullptr;
for (auto instr : computation->instructions()) {
if (instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0) == computation->parameter_instruction(0) &&
instr->tuple_index() == *loop_indvar_tuple_idx) {
loop_indvar = instr;
}
}
if (loop_indvar == nullptr) {
VLOG(3) << "Unable to find get-tuple-element("
<< computation->parameter_instruction(0)->name()
<< "), index=" << *loop_indvar_tuple_idx << " in "
<< computation->name();
return std::nullopt;
}
auto IsValidModule =
[loop_indvar](std::unique_ptr<HloModule>& module) -> bool {
if (module == nullptr || module->entry_computation()->num_parameters() != 1)
return false;
const HloInstruction* p0 =
module->entry_computation()->parameter_instruction(0);
if (p0->shape() != loop_indvar->shape()) {
VLOG(4) << "Extracted module must depend only on the loop induction "
"variable.";
return false;
};
return llvm::all_of(module->entry_computation()->instructions(),
[](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kPartitionId &&
instr->opcode() != HloOpcode::kReplicaId;
});
};
auto params = computation->parameter_instructions();
if (params.size() != 1 || !params[0]->shape().IsTuple()) {
VLOG(3) << "While loop parameter is expected to be a tuple.";
return std::nullopt;
}
std::unique_ptr<HloModule> offset_module = ExtractModule(
idx, -1,
[loop_indvar, params](const HloInstruction* inst) -> bool {
return inst != loop_indvar && llvm::find(params, inst) == params.end();
},
[](const HloInstruction* inst) -> ReplaceType {
return ReplaceType::kReplaceParam;
});
std::unique_ptr<HloModule> update_module = ExtractModule(
update_operation, -1,
[loop_indvar, params](const HloInstruction* inst) -> bool {
return inst != loop_indvar && llvm::find(params, inst) == params.end();
},
[](const HloInstruction* inst) -> ReplaceType {
return ReplaceType::kReplaceParam;
});
if (!IsValidModule(offset_module) || !IsValidModule(update_module)) {
return std::nullopt;
}
VLOG(3) << "Successfully generated offset and update modules";
std::vector<Literal> offset_values;
absl::Status status = [&]() -> absl::Status {
HloEvaluator evaluator;
const Literal& init =
whileop->operand(0)->operand(*loop_indvar_tuple_idx)->literal();
std::unique_ptr<Literal> updated_value = nullptr;
for (int64_t i = 0; i < *trip_count; i++) {
if (i == 0) {
evaluator.ResetVisitStates();
TF_ASSIGN_OR_RETURN(offset_values.emplace_back(),
evaluator.Evaluate(*offset_module, {&init}));
CHECK(offset_values.back().shape() == idx->shape());
evaluator.ResetVisitStates();
TF_ASSIGN_OR_RETURN(Literal next_update_value,
evaluator.Evaluate(*update_module, {&init}));
updated_value = next_update_value.CloneToUnique();
} else {
evaluator.ResetVisitStates();
TF_ASSIGN_OR_RETURN(
offset_values.emplace_back(),
evaluator.Evaluate(*offset_module, {updated_value.get()}));
CHECK(offset_values.back().shape() == idx->shape());
evaluator.ResetVisitStates();
TF_ASSIGN_OR_RETURN(
Literal next_update_value,
evaluator.Evaluate(*update_module, {updated_value.get()}));
updated_value = next_update_value.CloneToUnique();
}
}
VLOG(3) << "Offset values for " << idx->name() << ": "
<< absl::StrJoin(offset_values, ",",
[](std::string* out, const Literal& l) {
out->append(l.ToString());
});
return absl::OkStatus();
}();
if (status.ok()) return offset_values;
return std::nullopt;
}
absl::StatusOr<HloInstruction*> AddLoopIterationParam(HloInstruction* whileop) {
CHECK(whileop->opcode() == HloOpcode::kWhile);
HloComputation* while_body = whileop->while_body();
HloComputation* while_cond = whileop->while_condition();
const HloInstruction* while_init = whileop->operand(0);
CHECK(while_init->opcode() == HloOpcode::kTuple);
std::vector<HloInstruction*> new_init_operands(while_init->operands().begin(),
while_init->operands().end());
PrimitiveType indvar_type =
whileop->while_init()
->operand(*GetLoopInductionVarTupleIdx(whileop))
->shape()
.element_type();
new_init_operands.push_back(whileop->parent()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(
whileop->while_init()
->operand(*GetLoopInductionVarTupleIdx(whileop))
->shape()
.element_type(),
0)),
"zero"));
HloInstruction* new_while_init = whileop->parent()->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
HloInstruction* new_whileop = whileop->parent()->AddInstruction(
whileop->CloneWithNewOperands(new_while_init->shape(), {new_while_init}));
if (whileop->IsRoot()) {
absl::InlinedVector<HloInstruction*, 4> tuple_entries;
tuple_entries.reserve(while_init->shape().tuple_shapes_size());
for (auto i = 0; i < while_init->shape().tuple_shapes_size(); i++) {
tuple_entries.push_back(whileop->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(new_whileop, i)));
}
HloInstruction* new_whileop_result = whileop->parent()->AddInstruction(
HloInstruction::CreateTuple(tuple_entries));
TF_RETURN_IF_ERROR(
whileop->parent()->ReplaceInstruction(whileop, new_whileop_result));
} else {
TF_RETURN_IF_ERROR(whileop->parent()->ReplaceInstructionWithDifferentShape(
whileop, new_whileop));
}
while_cond->ReplaceParameter(0, HloInstruction::CreateParameter(
0, new_while_init->shape(), "new_param"));
HloInstruction* new_body_param = while_body->ReplaceParameter(
0,
HloInstruction::CreateParameter(0, new_while_init->shape(), "new_param"));
HloInstruction* gte = while_body->AddInstruction(
HloInstruction::CreateGetTupleElement(
new_body_param, new_while_init->shape().tuple_shapes_size() - 1),
"loop_iteration_count");
HloInstruction* c1 = while_body->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(indvar_type, 1)),
"one");
HloInstruction* add = while_body->AddInstruction(
HloInstruction::CreateBinary(gte->shape(), HloOpcode::kAdd, gte, c1),
"updated_loop_iteration_count");
absl::InlinedVector<HloInstruction*, 2> old_return_tuple_operands =
while_body->root_instruction()->operands();
std::vector<HloInstruction*> new_return_tuple_operands(
old_return_tuple_operands.begin(), old_return_tuple_operands.end());
new_return_tuple_operands.push_back(add);
HloInstruction* new_return_tuple = while_body->AddInstruction(
HloInstruction::CreateTuple(new_return_tuple_operands));
while_body->set_root_instruction(new_return_tuple, true);
return gte;
}
std::unique_ptr<HloInstruction> GetAsConstantInstruction(
const std::vector<Literal>& offset_values) {
if (offset_values.empty()) return nullptr;
std::unique_ptr<HloInstruction> value =
primitive_util::PrimitiveTypeSwitch<std::unique_ptr<HloInstruction>>(
[&offset_values](
auto primitive_type_constant) -> std::unique_ptr<HloInstruction> {
if constexpr (primitive_util::IsIntegralType(
primitive_type_constant)) {
using NativeT = typename primitive_util::PrimitiveTypeToNative<
primitive_type_constant>::type;
Array<NativeT> constantLiterals({(int64_t)offset_values.size()});
std::vector<NativeT> valuesAsTy;
valuesAsTy.reserve(offset_values.size());
for (auto& i : offset_values) {
valuesAsTy.push_back(
static_cast<NativeT>(i.data<NativeT>()[0]));
}
constantLiterals.SetValues(valuesAsTy);
return HloInstruction::CreateConstant(
LiteralUtil::CreateFromArray(constantLiterals));
}
return nullptr;
},
offset_values[0].shape().element_type());
return value;
}
bool PopulateOffsetValueMap(const HloInstruction* matched_instr,
OffsetValueMap& value_map) {
OffsetValueMap local_value_map;
if (auto dyn_idx_op = DynCast<HloDynamicIndexInstruction>(matched_instr);
dyn_idx_op) {
for (auto indexop : dyn_idx_op->index_operands()) {
if (indexop->IsConstant()) continue;
if (local_value_map.contains(indexop) || value_map.contains(indexop))
continue;
std::optional<std::vector<Literal>> values = GetValues(indexop);
if (values == std::nullopt) return false;
if (values->empty() || !primitive_util::IsIntegralType(
values->at(0).shape().element_type())) {
return false;
}
std::transform(values->begin(), values->end(),
std::back_inserter(local_value_map[indexop]),
[](Literal& l) { return std::move(l); });
}
}
for (auto& [op, values] : local_value_map) {
std::transform(values.begin(), values.end(),
std::back_inserter(value_map[op]),
[](Literal& l) { return std::move(l); });
}
VLOG(2) << "Received " << local_value_map.size() << " new offsets.";
return true;
}
absl::Status ReplaceOffsetCalculationWithArrayAccess(
PtrVec<HloInstruction*> fusions, OffsetValueMap& value_map) {
absl::flat_hash_map<HloComputation*, HloInstruction*> loop_iteration_param;
for (auto& [instr, _] : value_map) {
VLOG(2) << "Handling " << instr->name();
if (!instr->parent()->IsWhileBodyComputation()) {
VLOG(2) << "It is not a while body computation";
return absl::InternalError(
absl::StrFormat("%s is expected to be a while computation.",
instr->parent()->name()));
}
if (loop_iteration_param.find(instr->parent()) !=
loop_iteration_param.end()) {
VLOG(2) << "This was already handled";
continue;
}
VLOG(2) << "Adding loop iteration param for " << instr->parent()->name();
TF_ASSIGN_OR_RETURN(
loop_iteration_param[instr->parent()],
AddLoopIterationParam(instr->parent()->WhileCallInstruction()));
}
for (auto fusion_instr : fusions) {
for (auto maybe_offset : fusion_instr->operands()) {
if (value_map.find(maybe_offset) == value_map.end()) continue;
HloInstruction* loop_counter =
loop_iteration_param[fusion_instr->parent()];
HloComputation* fusion = fusion_instr->fused_instructions_computation();
loop_iteration_param[fusion] =
fusion_instr->AddFusionOperand(loop_counter);
break;
}
}
for (auto fusion_instr : fusions) {
absl::flat_hash_map<HloInstruction*, HloInstruction*> param_replacement_map;
absl::InlinedVector<HloInstruction*, 4> parameters;
HloComputation* fusion_comp =
fusion_instr->fused_instructions_computation();
for (auto [idx, maybe_offset] : llvm::enumerate(fusion_instr->operands())) {
HloInstruction* offset_param =
fusion_instr->fused_instructions_computation()->parameter_instruction(
idx);
if (value_map.find(maybe_offset) == value_map.end() ||
param_replacement_map.contains(offset_param))
continue;
std::vector<Literal>& values = value_map.at(maybe_offset);
std::unique_ptr<HloInstruction> values_as_const_instruction =
GetAsConstantInstruction(values);
if (values_as_const_instruction == nullptr) {
return absl::InternalError(
"Unable to convert offsets into constant array.");
}
HloInstruction* array = fusion_comp->AddInstruction(
std::move(values_as_const_instruction), "offset_values");
HloInstruction* ds =
fusion_comp->AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(offset_param->shape().element_type(), {1}),
array, {loop_iteration_param[fusion_comp]}, {1}));
HloInstruction* offset = fusion_comp->AddInstruction(
HloInstruction::CreateReshape(offset_param->shape(), ds), "offset");
param_replacement_map[offset_param] = offset;
parameters.push_back(offset_param);
}
for (auto param = parameters.rbegin(); param != parameters.rend();
param++) {
auto offset = param_replacement_map[*param];
TF_RETURN_IF_ERROR(fusion_comp->ReplaceInstruction(*param, offset));
}
}
return absl::OkStatus();
}
UseDefDataflowPaths GetSlicedOperandPaths(const HloInstruction* instr,
OffsetValueMap& value_map) {
UseDefDataflowPaths sliced_operand_paths;
InstructionSet processed_instrs;
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
aliasing_pairs;
if (instr->opcode() == HloOpcode::kCustomCall) {
aliasing_pairs =
Cast<HloCustomCallInstruction>(instr)->output_to_operand_aliasing();
}
absl::flat_hash_set<int64_t> aliased_operands;
for (const auto& pair : aliasing_pairs) {
aliased_operands.insert(pair.second.first);
}
for (const auto* operand : instr->operands()) {
if (aliased_operands.contains(instr->operand_index(operand))) continue;
UseDefDataflowPath maybe_sliced_operand_path;
bool slice_found = false;
auto maybe_slice_instr =
HloBfsFindIf({operand}, [&](const HloInstruction* cur) {
if (processed_instrs.contains(cur)) return true;
maybe_sliced_operand_path.push_back(const_cast<HloInstruction*>(cur));
if (IsOpcodeAnyOf<HloOpcode::kDynamicSlice, HloOpcode::kSlice>(cur)) {
if (IsAlignedSlice(cur)) {
slice_found = true;
return slice_found;
}
}
return !IsNoOp(cur);
});
if (maybe_slice_instr == std::nullopt) continue;
bool valid_slice_status =
PopulateOffsetValueMap(*maybe_slice_instr, value_map);
if ((valid_slice_status && slice_found) ||
processed_instrs.contains(maybe_slice_instr.value())) {
sliced_operand_paths.insert(sliced_operand_paths.end(),
maybe_sliced_operand_path.rbegin(),
maybe_sliced_operand_path.rend());
processed_instrs.insert(maybe_sliced_operand_path.begin(),
maybe_sliced_operand_path.end());
}
}
sliced_operand_paths.push_back(const_cast<HloInstruction*>(instr));
return sliced_operand_paths;
}
DefUseDataflowPaths GetSlicedUserPaths(const HloInstruction* instr,
OffsetValueMap& value_map) {
DefUseDataflowPaths sliced_user_paths;
InstructionSet processed_instrs;
auto traverse_hlo_and_collect = [&](HloInstruction* start) {
DefUseDataflowPath maybe_sliced_user_path;
bool dus_found = false;
auto maybe_dus_instr = HloBfsFindIf(
{start},
[&](const HloInstruction* cur) {
if (processed_instrs.contains(cur)) return true;
maybe_sliced_user_path.push_back(const_cast<HloInstruction*>(cur));
if (const auto slice_instr =
DynCast<HloDynamicUpdateSliceInstruction>(cur)) {
if (IsAlignedSlice(slice_instr)) {
dus_found = true;
return true;
}
}
return cur->user_count() > 1 || !IsNoOp(cur);
},
false);
if (maybe_dus_instr == std::nullopt) return;
bool valid_slice_status =
PopulateOffsetValueMap(*maybe_dus_instr, value_map);
if ((valid_slice_status && dus_found) ||
processed_instrs.contains(maybe_dus_instr.value())) {
processed_instrs.insert(maybe_sliced_user_path.begin(),
maybe_sliced_user_path.end());
sliced_user_paths.push_back(std::move(maybe_sliced_user_path));
}
};
if (instr->shape().IsTuple()) {
for (auto* user : instr->users()) {
if (DynCast<HloGetTupleElementInstruction>(user)) {
traverse_hlo_and_collect(user);
}
}
} else {
if (instr->user_count() == 1) {
traverse_hlo_and_collect(instr->users().front());
}
}
return sliced_user_paths;
}
absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures(
DataflowPathView matches) {
absl::InlinedVector<HloInstruction*, 4> captures;
InstructionSet matched_instrs(matches.begin(), matches.end());
for (HloInstruction* instr : matches) {
for (HloInstruction* operand : instr->operands()) {
if (!matched_instrs.contains(operand) &&
absl::c_find(captures, operand) == captures.end()) {
captures.emplace_back(operand);
}
}
}
return captures;
}
absl::Status CreateRootTuple(
HloInstruction* hero, HloComputation::Builder& builder,
DataflowPathsView sliced_user_paths,
absl::flat_hash_map<const HloInstruction*, HloInstruction*>&
instr_mapping) {
unsigned tuple_size = hero->shape().tuple_shapes_size();
std::vector<HloInstruction*> sliced_elems(tuple_size, nullptr);
for (auto& sliced_user_path : sliced_user_paths) {
auto gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front());
sliced_elems[gte->tuple_index()] = sliced_user_path.back();
}
std::vector<HloInstruction*> elements;
for (size_t i = 0; i < tuple_size; ++i) {
if (sliced_elems[i] != nullptr) {
elements.push_back(instr_mapping[sliced_elems[i]]);
continue;
}
auto* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(instr_mapping[hero], i));
if (hero->shape().tuple_shapes(i).IsTuple()) {
instr_mapping[gte] = gte;
TF_RETURN_IF_ERROR(CreateRootTuple(gte, builder, {}, instr_mapping));
elements.push_back(builder.last_added_instruction());
} else {
elements.push_back(gte);
}
}
if (elements.size() > 1)
builder.AddInstruction(HloInstruction::CreateTuple(elements));
return absl::OkStatus();
}
absl::StatusOr<HloComputation*> CreateFusionBody(
HloModule* module, DataflowPathView sliced_operand_paths,
DataflowPathsView sliced_user_paths, DataflowPathView captures) {
HloComputation::Builder builder("dynamic-slice-fusion");
absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
operands.push_back(instr_mapping.at(operand));
}
return operands;
};
for (const HloInstruction* capture : captures) {
int64_t index = instr_mapping.size();
instr_mapping[capture] =
builder.AddInstruction(HloInstruction::CreateParameter(
index, capture->shape(), absl::StrCat("p", index)));
}
HloInstruction* hero;
for (HloInstruction* instr : sliced_operand_paths) {
instr_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr)));
hero = instr;
}
for (auto& sliced_user_path : sliced_user_paths) {
for (HloInstruction* instr : sliced_user_path) {
instr_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr)));
}
}
if (hero->shape().IsTuple() && hero->shape().tuple_shapes_size() > 0) {
TF_RETURN_IF_ERROR(
CreateRootTuple(hero, builder, sliced_user_paths, instr_mapping));
}
return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false);
}
absl::StatusOr<HloInstruction*> CreateFusionInstruction(
HloModule* module, HloInstruction* orig, DataflowPathView captures,
HloComputation* body, bool dynamic) {
HloComputation* parent = orig->parent();
HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion(
body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom,
captures, body));
module->SetAndUniquifyInstrName(fusion, "address_computation");
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind("__custom_fusion");
CustomFusionConfig config;
config.set_name(dynamic ? "dynamic_address_computation"
: "address_computation");
*backend_config.mutable_custom_fusion_config() = config;
TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config)));
return fusion;
}
}
absl::StatusOr<bool> DynamicSliceFusionRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<HloInstruction*,
std::pair<UseDefDataflowPaths, DefUseDataflowPaths>>
matches_kv;
std::vector<HloInstruction*> matches;
OffsetValueMap value_map;
for (HloComputation* computation : module->computations()) {
if (computation->IsFusionComputation()) continue;
for (HloInstruction* instr : computation->instructions()) {
if ((instr->opcode() == HloOpcode::kReduceScatter &&
instr->shape().IsArray()) ||
IsLegacyCublasMatmul(*instr) || IsCustomCall(instr, platform_name_)) {
UseDefDataflowPaths sliced_operand_paths =
GetSlicedOperandPaths(instr, value_map);
VLOG(1) << "For operation: " << instr->name() << ", operands: "
<< absl::StrJoin(
sliced_operand_paths, ",",
[](std::string* out, const HloInstruction* inst) {
out->append(inst->name());
});
bool has_sliced_operand_paths = sliced_operand_paths.size() > 1;
DefUseDataflowPaths sliced_user_paths =
GetSlicedUserPaths(instr, value_map);
VLOG(1) << "For operation: " << instr->name() << ", users: "
<< absl::StrJoin(
sliced_user_paths, ",",
[](std::string* out, const DefUseDataflowPath& path) {
out->append(
"{" +
absl::StrJoin(path, ",",
[](std::string* out,
const HloInstruction* inst) {
out->append(inst->name());
}) +
"}");
});
bool has_sliced_user_paths = absl::c_any_of(
sliced_user_paths,
[&](auto& sliced_user_path) { return !sliced_user_path.empty(); });
if (absl::c_any_of(sliced_user_paths, [&](auto& sliced_user_path) {
return DynCast<HloDynamicUpdateSliceInstruction>(
sliced_user_path.back()) == nullptr;
})) {
return absl::InternalError(
"Expect sliced user path to end with a DUS.");
}
if (has_sliced_operand_paths || has_sliced_user_paths) {
matches_kv[instr] = std::make_pair(std::move(sliced_operand_paths),
std::move(sliced_user_paths));
matches.push_back(instr);
}
}
}
}
if (matches.empty()) return false;
PtrVec<HloInstruction*> fusions;
for (HloInstruction* hero : matches) {
auto& paths = matches_kv[hero];
auto& [sliced_operand_paths, sliced_user_paths] = paths;
std::vector<HloInstruction*> matched_instrs;
absl::c_copy(sliced_operand_paths, std::back_inserter(matched_instrs));
std::vector<DataflowPathView> sliced_user_paths_view;
for (auto& sliced_user_path : sliced_user_paths) {
absl::c_copy(sliced_user_path, std::back_inserter(matched_instrs));
DataflowPathView sliced_user_path_view{&sliced_user_path.front(),
sliced_user_path.size()};
sliced_user_paths_view.push_back(std::move(sliced_user_path_view));
}
auto captures = GetPatternCaptures(matched_instrs);
TF_ASSIGN_OR_RETURN(
HloComputation * fusion_body,
CreateFusionBody(module, sliced_operand_paths,
DataflowPathsView(sliced_user_paths_view), captures));
bool has_dynamic_slices = absl::c_any_of(matched_instrs, [&](auto* instr) {
return DynCast<HloDynamicIndexInstruction>(instr) != nullptr;
});
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion,
CreateFusionInstruction(module, hero, captures, fusion_body,
has_dynamic_slices));
fusions.push_back(fusion);
HloComputation* parent = hero->parent();
if (fusion->shape().IsTuple()) {
TF_RETURN_IF_ERROR(parent->ReplaceInstructionWithDifferentShape(
const_cast<HloInstruction*>(hero), fusion));
for (auto& sliced_user_path : sliced_user_paths) {
auto old_gte =
Cast<HloGetTupleElementInstruction>(sliced_user_path.front());
HloInstruction* gte =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
fusion, old_gte->tuple_index()));
TF_RETURN_IF_ERROR(
parent->ReplaceInstruction(sliced_user_path.back(), gte));
}
} else {
auto* instr_to_be_replaced = const_cast<HloInstruction*>(hero);
if (sliced_user_paths.empty()) {
if (hero->shape().IsTuple()) {
if (hero->user_count() != 1 ||
!DynCast<HloGetTupleElementInstruction>(hero->users().front())) {
return absl::InternalError(
"Expect a single get-tuple-element user of the original "
"tuple-shaped hero op when address computation fusion does "
"not return a tuple");
}
instr_to_be_replaced = hero->users().front();
}
} else {
instr_to_be_replaced = sliced_user_paths.front().back();
}
TF_RETURN_IF_ERROR(
parent->ReplaceInstruction(instr_to_be_replaced, fusion));
if (hero->parent()) {
TF_RETURN_IF_ERROR(hero->parent()->RemoveInstruction(hero));
}
}
}
TF_RETURN_IF_ERROR(
ReplaceOffsetCalculationWithArrayAccess(fusions, value_map));
return true;
}
}
} | #include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h"
#include <cstddef>
#include <optional>
#include "absl/status/status.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/stream.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
class DynamicSliceFusionRewriterTest : public HloTestBase {};
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemm) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWithWorkspace) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWorkspaceIgnored) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotRoot) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1)
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandHasMultipleUsers) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[4,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[2:3], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %bitcast.41)
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[2:3], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[P0]], [[P1]])
; CHECK-DAG: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK-DAG: backend_config={
; CHECK-DAG: "kind":"__custom_fusion",
; CHECK-DAG: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK-DAG: }
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[B0]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsHaveMultipleUsers) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.0 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmSlicingNotParameter) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[4,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.12 = f16[2,8,8]{2,1,0} slice(%p0), slice={[0:2], [0:8], [0:8]}
%slice.13 = f16[1,8,8]{2,1,0} slice(%slice.12), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1)
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[2,8,8]{2,1,0} slice([[P0]]), slice={[0:2], [0:8], [0:8]}
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[S0]], [[P1]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotContiguousSlice) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,4,6]{2,1,0} slice(%p0), slice={[1:2], [0:4], [0:6]}
%bitcast.41 = f16[4,6]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,6,4]{2,1,0} slice(%p1), slice={[1:2], [0:6], [0:4]}
%bitcast.42 = f16[6,4]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[4,4]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNonNoOpInSliceChain) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%slice.14 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%add.0 = f16[1,8,8]{2,1,0} add(%slice.13, %slice.14)
%bitcast.41 = f16[8,8]{1,0} bitcast(%add.0)
%slice.15 = f16[1,8,8]{2,1,0} slice(%p1), slice={[0:1], [0:8], [0:8]}
%slice.16 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%add.1 = f16[1,8,8]{2,1,0} add(%slice.15, %slice.16)
%bitcast.42 = f16[8,8]{1,0} bitcast(%add.1)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmDuplicateOperand) {
const char* hlo = R"(
HloModule test
ENTRY %main {
%p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0)
%get-tuple-element.240 = f32[100,100]{1,0} get-tuple-element(%p0), index=0
%get-tuple-element.241 = f32[100,100]{1,0} get-tuple-element(%p0), index=1
%concatenate.10 = f32[200,100]{1,0} concatenate(%get-tuple-element.240, %get-tuple-element.241), dimensions={0}
%custom-call.16 = (f32[200,100]{1,0}, s8[120000]{0}) custom-call(%concatenate.10, %get-tuple-element.240),
custom_call_target="__cublas$gemm",
backend_config={
"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["HIGHEST","HIGHEST"]},
"epilogue":"DEFAULT",
"lhs_stride":"20000",
"rhs_stride":"10000",
"grad_x":false,
"grad_y":false
}
}
%get-tuple-element.97 = f32[200,100]{1,0} get-tuple-element(%custom-call.16), index=0
%slice.26 = f32[100,100]{1,0} slice(%get-tuple-element.97), slice={[0:100], [0:100]}
ROOT %custom-call.17 = (f32[100,100]{1,0}, s8[80000]{0}) custom-call(%slice.26, %slice.26),
custom_call_target="__cublas$gemm",
backend_config={
"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["HIGHEST","HIGHEST"]},
"epilogue":"DEFAULT",
"lhs_stride":"10000",
"rhs_stride":"10000",
"grad_x":false,
"grad_y":false
}
}
})";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0)
; CHECK: [[S0:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[0:100], [0:100]}
; CHECK-NOT: slice
; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) custom-call([[S0]], [[S0]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%p1 = f16[2,8,8]{2,1,0} parameter(0)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder2) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[0:1], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandAliasingOutput) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0)
%get-tuple-element.287 = f32[100,100]{1,0} get-tuple-element(%p0), index=0
%get-tuple-element.288 = f32[100,100]{1,0} get-tuple-element(%p0), index=1
%concatenate.12 = f32[200,100]{1,0} concatenate(%get-tuple-element.287, %get-tuple-element.288), dimensions={0}
%slice.30 = f32[100,100]{1,0} slice(%concatenate.12), slice={[16:116], [0:100]}
%slice.34 = f32[100,100]{1,0} slice(%concatenate.12), slice={[99:199], [0:100]}
ROOT %cublas-gemm.15 = (f32[100,100]{1,0}, s8[120000]{0}) custom-call(%get-tuple-element.287, %slice.30, %slice.34),
custom_call_target="__cublas$gemm",
output_to_operand_aliasing={{0}: (2, {})},
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":1,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["HIGHEST","HIGHEST"]},
"epilogue":"DEFAULT",
"lhs_stride":"10000",
"rhs_stride":"10000",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P2:%[^ ]+]] = f32[100,100]{1,0} parameter(2)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[100,100]{1,0} parameter(1)
; CHECK-DAG: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0)
; CHECK-DAG: [[S1:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[16:116], [0:100]}
; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[120000]{0}) custom-call([[P1]], [[S1]], [[P2]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[P:%[^ ]+]] = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0)
; CHECK: [[GTE0:%[^ ]+]] = f32[100,100]{1,0} get-tuple-element([[P]]), index=0
; CHECK: [[GTE1:%[^ ]+]] = f32[100,100]{1,0} get-tuple-element([[P]]), index=1
; CHECK: [[CONCAT:%[^ ]+]] = f32[200,100]{1,0} concatenate([[GTE0]], [[GTE1]]), dimensions={0}
; CHECK: [[S:%[^ ]+]] = f32[100,100]{1,0} slice([[CONCAT]]), slice={[99:199], [0:100]}
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[120000]{0}) fusion([[CONCAT]], [[GTE0]], [[S]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsFromSameSlice) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%bitcast.42 = f16[8,8]{0,1} bitcast(%slice.13)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{0,1} bitcast([[S0]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src,
ffi::AnyBuffer dst) {
se::DeviceMemoryBase dst_mem = dst.device_memory();
se::DeviceMemoryBase src_mem = src.device_memory();
return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size());
}
XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Arg<ffi::AnyBuffer>()
.Arg<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", "gpu",
kMemcpy);
TEST_F(DynamicSliceFusionRewriterTest, SimpleCustomCall) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$memcpy",
{Slice(Broadcast(ConstantR0WithType(&b, F32, 42.0), {256}), {0},
{128}, {1})},
ShapeUtil::MakeShape(F32, {128}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
xla::HloModuleConfig hlo_config(
xla::ProgramShape(computation.proto().host_program_shape()),
false);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
hlo_config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto(
computation.proto(), hlo_config));
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[256]{0} parameter(0)
; CHECK: [[S0:%[^ ]+]] = f32[128]{0} slice([[P0]]), slice={[0:128]}
; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[S0]]),
; CHECK: custom_call_target="__xla_test$$memcpy",
; CHECK: api_version=API_VERSION_TYPED_FFI
; CHECK: }
; CHECK: ENTRY %{{.*}} {
; CHECK: [[C0:%[^ ]+]] = f32[] constant(42)
; CHECK: [[BC:%[^ ]+]] = f32[256]{0} broadcast([[C0]])
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion([[BC]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"),
expected);
}
void Callback_Void(se::gpu::GpuStreamHandle stream, void** buffers,
const char* , size_t ) {}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Void, "gpu");
TEST_F(DynamicSliceFusionRewriterTest, SimpleCustomCallLegacy) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_Void",
{Slice(Broadcast(ConstantR0WithType(&b, F32, 42.0), {256}), {0},
{128}, {1})},
ShapeUtil::MakeShape(F32, {128}), "");
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
xla::HloModuleConfig hlo_config(
xla::ProgramShape(computation.proto().host_program_shape()),
false);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
hlo_config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto(
computation.proto(), hlo_config));
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[256]{0} parameter(0)
; CHECK: [[S0:%[^ ]+]] = f32[128]{0} slice([[P0]]), slice={[0:128]}
; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[S0]]),
; CHECK: custom_call_target="Callback_Void"
; CHECK: }
; CHECK: ENTRY %{{.*}} {
; CHECK: [[C0:%[^ ]+]] = f32[] constant(42)
; CHECK: [[BC:%[^ ]+]] = f32[256]{0} broadcast([[C0]])
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion([[BC]])
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, TupleSliceCustomCallLegacy) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_Void",
{
Tuple(&b,
{
Slice(Broadcast(ConstantR0WithType(&b, F32, 5), {8, 8}),
{0, 0}, {4, 8}, {1, 1}),
Broadcast(ConstantR0WithType(&b, F32, 2), {256}),
}),
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 3), {1024}),
Broadcast(ConstantR0WithType(&b, F32, 4), {8}),
}),
},
ShapeUtil::MakeShape(F32, {128}), "");
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
xla::HloModuleConfig hlo_config(
xla::ProgramShape(computation.proto().host_program_shape()),
false);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
hlo_config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto(
computation.proto(), hlo_config));
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0)
; CHECK-DAG: [[S0:%[^ ]+]] = f32[4,8]{1,0} slice([[P0]]), slice={[0:4], [0:8]}
; CHECK-DAG: [[P1:%[^ ]+]] = f32[256]{0} parameter(1)
; CHECK-DAG: [[T0:%[^ ]+]] = (f32[4,8]{1,0}, f32[256]{0}) tuple([[S0]], [[P1]])
; CHECK-DAG: [[P2:%[^ ]+]] = (f32[1024]{0}, f32[8]{0}) parameter(2)
; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[T0]], [[P2]]),
; CHECK: custom_call_target="Callback_Void"
; CHECK: }
; CHECK: ENTRY %{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion(
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, TupledOutputCustomCallLegacy) {
XlaBuilder b(TestName());
auto custom_call = CustomCall(
&b, "Callback_Void",
{
Tuple(&b,
{
Slice(Broadcast(ConstantR0WithType(&b, F32, 5), {8, 8}),
{0, 0}, {4, 8}, {1, 1}),
Broadcast(ConstantR0WithType(&b, F32, 2), {256}),
}),
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 3), {1024}),
Broadcast(ConstantR0WithType(&b, F32, 4), {8}),
}),
},
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {8}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {128}),
ShapeUtil::MakeShape(F32, {256}),
}),
ShapeUtil::MakeShape(F32, {1024}),
ShapeUtil::MakeShape(F32, {4, 8}),
}),
"");
Tuple(&b, {GetTupleElement(GetTupleElement(custom_call, 1), 0),
GetTupleElement(custom_call, 2)});
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
xla::HloModuleConfig hlo_config(
xla::ProgramShape(computation.proto().host_program_shape()),
false);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
hlo_config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto(
computation.proto(), hlo_config));
const char* expected = R"(
; CHECK: %dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P2:%[^ ]+]] = (f32[1024]{0}, f32[8]{0}) parameter(2)
; CHECK-DAG: [[P1:%[^ ]+]] = f32[256]{0} parameter(1)
; CHECK-DAG: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0)
; CHECK-DAG: [[S0:%[^ ]+]] = f32[4,8]{1,0} slice([[P0]]), slice={[0:4], [0:8]}
; CHECK-DAG: [[T0:%[^ ]+]] = (f32[4,8]{1,0}, f32[256]{0}) tuple([[S0]], [[P1]])
; CHECK: [[CC:%[^ ]+]] = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) custom-call([[T0]], [[P2]]),
; CHECK: custom_call_target="Callback_Void"
; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[8]{0} get-tuple-element([[CC]]), index=0
; CHECK-DAG: [[GTE1:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) get-tuple-element([[CC]]), index=1
; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[128]{0} get-tuple-element([[GTE1]]), index=0
; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[256]{0} get-tuple-element([[GTE1]]), index=1
; CHECK-DAG: [[T1:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) tuple([[GTE2]], [[GTE3]])
; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1024]{0} get-tuple-element([[CC]]), index=2
; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[4,8]{1,0} get-tuple-element([[CC]]), index=3
; CHECK: ROOT {{.*}} = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) tuple([[GTE0]], [[T1]], [[GTE4]], [[GTE5]])
; CHECK: }
; CHECK: ENTRY %{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0}
; CHECK: }
; CHECK-DAG: [[GTE6:%[^ ]+]] = f32[1024]{0} get-tuple-element([[FUSION]]), index=2
; CHECK-DAG: [[GTE7:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) get-tuple-element([[FUSION]]), index=1
; CHECK-DAG: [[GTE8:%[^ ]+]] = f32[128]{0} get-tuple-element([[GTE7]]), index=0
; CHECK: ROOT {{.*}} = (f32[128]{0}, f32[1024]{0}) tuple([[GTE8]], [[GTE6]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, UnalignedSlice) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_Void",
{Slice(Broadcast(ConstantR0WithType(&b, S32, 42), {17}), {1}, {17}, {1})},
ShapeUtil::MakeShape(S32, {16}), "");
TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build());
xla::HloModuleConfig hlo_config(
xla::ProgramShape(computation.proto().host_program_shape()),
false);
DebugOptions debug_options = GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false);
hlo_config.set_debug_options(debug_options);
TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto(
computation.proto(), hlo_config));
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemm) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
ROOT custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmWithWorkspace) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
ROOT custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmWorkspaceIgnored) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(custom-call.1), index=0
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmNotRoot) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT res = f16[8,8]{1,0} add(custom-call.1, custom-call.1)
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemm) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[1,8,8]{2,1,0} parameter(0)
p1 = f16[1,8,8]{2,1,0} parameter(1)
p2 = f16[4,8,8]{2,1,0} parameter(2)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
bitcast.41 = f16[8,8]{1,0} bitcast(p0)
bitcast.42 = f16[8,8]{1,0} bitcast(p1)
custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1)
ROOT dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32)
}
)";
const char* expected = R"(
; CHECK-DAG: [[P0:%[^ ]+]] = f16[8,8]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[8,8]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(2)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(3)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(4)
; CHECK-DAG: [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[P0]], [[P1]]),
; CHECK-DAG: custom_call_target="__cublas$gemm"
; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[CC]])
; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[4,8,8]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmNotRoot) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
p2 = f16[4,8,8]{2,1,0} parameter(2)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1)
dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32)
ROOT res = f16[4,8,8]{2,1,0} log(dus)
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(4)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK-DAG: [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK-DAG: custom_call_target="__cublas$gemm"
; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[CC]])
; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = f16[4,8,8]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} log([[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmWithWorkspace) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[2,8,8]{2,1,0} parameter(0)
p1 = f16[2,8,8]{2,1,0} parameter(1)
p2 = f16[4,8,8]{2,1,0} parameter(2)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.41 = f16[8,8]{1,0} bitcast(slice.13)
slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8}
bitcast.42 = f16[8,8]{1,0} bitcast(slice.14)
custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(custom-call.1), index=0
bitcast.43 = f16[1,8,8]{2,1,0} bitcast(get-tuple-element.0)
dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32)
get-tuple-element.1 = s8[256]{0} get-tuple-element(custom-call.1), index=1
ROOT tuple = (f16[4,8,8]{2,1,0}, s8[256]{0}) tuple(dus, get-tuple-element.1)
}
)";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3)
; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(4)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[DOT]])
; CHECK: [[DUS:%[^ ]+]] = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]])
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0})
; CHECK: tuple([[DUS]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: [[DUS_MAIN:%[^ ]+]] = f16[4,8,8]{2,1,0} get-tuple-element([[FUSION]]), index=0
; CHECK: [[WORKSPACE_MAIN:%[^ ]+]] = s8[256]{0} get-tuple-element([[FUSION]]), index=1
; CHECK: ROOT {{.*}} = (f16[4,8,8]{2,1,0}, s8[256]{0})
; CHECK: tuple([[DUS_MAIN]], [[WORKSPACE_MAIN]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmWorkspaceIgnored) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[8,8]{1,0} parameter(0)
%p1 = f16[8,8]{1,0} parameter(1)
%p2 = f16[4,8,8]{2,1,0} parameter(2)
%c1_s32 = s32[] constant(1)
%c0_s32 = s32[] constant(0)
%custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%p0, %p1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
%get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0
%bitcast.43 = f16[1,8,8]{2,1,0} bitcast(%get-tuple-element.0)
ROOT %dus = f16[4,8,8]{2,1,0} dynamic-update-slice(%p2, %bitcast.43, %c1_s32, %c0_s32, %c0_s32)
})";
const char* expected = R"(
; CHECK: dynamic-slice-fusion{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[8,8]{1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[8,8]{1,0} parameter(1)
; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(2)
; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(3)
; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(4)
; CHECK-DAG: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[P0]], [[P1]]),
; CHECK-DAG: custom_call_target="__cublas$gemm"
; CHECK-DAG: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[DOT]])
; CHECK-DAG: [[DUS:%[^ ]+]] = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]])
; CHECK-DAG: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0})
; CHECK: tuple([[DUS]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%dynamic-slice-fusion,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0}
; CHECK: }
; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[4,8,8]{2,1,0} get-tuple-element([[FUSION]]), index=0
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSConstantOffset) {
const char* hlo = R"(
HloModule test, replica_count=2
add {
param_0 = f16[] parameter(0)
param_1 = f16[] parameter(1)
ROOT add.1 = f16[] add(param_0, param_1)
}
ENTRY main.9 {
param_0 = f16[128,128]{1,0} parameter(0)
param_1 = f16[128,128]{1,0} parameter(1)
constant_20 = u32[] constant(20)
constant_0 = u32[] constant(0)
reduce-scatter = f16[64,128]{1,0} reduce-scatter(param_0), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
ROOT loop_dynamic_update_slice_fusion = f16[128,128]{1,0} dynamic-update-slice(param_1, reduce-scatter, constant_20, constant_0)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSParameterOffset) {
const char* hlo = R"(
HloModule test, replica_count=2
add.clone {
x.1 = f16[] parameter(0)
y.1 = f16[] parameter(1)
ROOT add.462 = f16[] add(x.1, y.1)
}
ENTRY %main.9 {
param_0 = f16[128,128]{1,0} parameter(0)
param_1 = f16[128,128]{1,0} parameter(1)
param_2 = u32[] parameter(2)
constant_0 = u32[] constant(0)
reduce-scatter = f16[64,128]{1,0} reduce-scatter(param_0), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add.clone
ROOT dynamic-update-slice = f16[128,128]{1,0} dynamic-update-slice(param_1, reduce-scatter, param_2, constant_0)
})";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSLoopIterationOffset) {
const char* hlo = R"(
HloModule jit_scan, replica_count=2
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.6 = f32[] add(param_0, param_1)
}
Body {
arg_tuple.1 = (s32[], f32[128,128]{1,0}, f32[128,128,128]{2,1,0}, f32[128,128]{1,0}) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(arg_tuple.1), index=0
constant.1 = s32[] constant(1)
add.7 = s32[] add(get-tuple-element.5, constant.1)
get-tuple-element.6 = f32[128,128]{1,0} get-tuple-element(arg_tuple.1), index=3
get-tuple-element.7 = f32[128,128,128]{2,1,0} get-tuple-element(arg_tuple.1), index=2
reduce-scatter.0 = f32[64,128]{1,0} reduce-scatter(get-tuple-element.6), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
bitcast.63 = f32[1,64,128]{2,1,0} bitcast(reduce-scatter.0)
constant.2 = s32[] constant(0)
compare.4 = pred[] compare(get-tuple-element.5, constant.2), direction=LT
constant.3 = s32[] constant(128)
add.8 = s32[] add(get-tuple-element.5, constant.3)
select.2 = s32[] select(compare.4, add.8, get-tuple-element.5)
dynamic-update-slice.2 = f32[128,128,128]{2,1,0} dynamic-update-slice(get-tuple-element.7, bitcast.63, select.2, constant.2, constant.2)
ROOT tuple.1 = tuple(add.7, get-tuple-element.6, dynamic-update-slice.2, get-tuple-element.6)
}
Cond {
arg_tuple.0 = (s32[], f32[128,128]{1,0}, f32[128,128,128]{2,1,0}, f32[128,128]{1,0}) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(arg_tuple.0), index=0
constant.0 = s32[] constant(128)
ROOT compare.5 = pred[] compare(get-tuple-element.4, constant.0), direction=LT
}
ENTRY main.55 {
Arg_2.3 = f32[128,128,128]{2,1,0} parameter(2)
constant.4 = s32[] constant(0)
Arg_1.2 = f32[128,128]{1,0} parameter(1)
constant.5 = f32[] constant(0)
broadcast.1 = f32[128,128,128]{2,1,0} broadcast(constant.5), dimensions={}
Arg_0.1 = f32[128,128]{1,0} parameter(0)
tuple = tuple(constant.4, Arg_1.2, broadcast.1, Arg_0.1)
while = while(tuple), condition=Cond, body=Body, backend_config={"known_trip_count":{"n":"128"}}
get-tuple-element.50 = f32[128,128]{1,0} get-tuple-element(while), index=1
get-tuple-element.51 = f32[128,128,128]{2,1,0} get-tuple-element(while), index=2
ROOT tuple.54 = (f32[128,128]{1,0}, f32[128,128,128]{2,1,0}) tuple(get-tuple-element.50, get-tuple-element.51)
})";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmLoopIteration) {
const char* hlo = R"(
HloModule test
%Body {
param = (f16[1,8,8]{2,1,0}, f16[1,8,8]{2,1,0}, f16[4,8,8]{2,1,0}, u32[]) parameter(0)
p0 = get-tuple-element(param), index=0
p1 = get-tuple-element(param), index=1
p2 = get-tuple-element(param), index=2
loop_iter = get-tuple-element(param), index=3
bitcast.41 = f16[8,8]{1,0} bitcast(p0)
bitcast.42 = f16[8,8]{1,0} bitcast(p1)
custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm"
bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1)
c0 = u32[] constant(0)
c_trip_count = u32[] constant(8)
compare = pred[] compare(loop_iter, c0), direction=LT
add = u32[] add(loop_iter, c_trip_count)
offset = u32[] select(compare, add, loop_iter)
dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, offset, c0, c0)
c1 = u32[] constant(1)
add2 = u32[] add(loop_iter, c1)
ROOT tuple = tuple(p0, p1, dus, u32[] add2)
}
%Cond {
%param.1 = (f16[1,8,8]{2,1,0}, f16[1,8,8]{2,1,0}, f16[4,8,8]{2,1,0}, u32[]) parameter(0)
%i.1 = u32[] get-tuple-element(%param.1), index=3
%trip_count = u32[] constant(8)
ROOT %done = pred[] compare(u32[] %i.1, u32[] %trip_count), direction=LT
}
ENTRY %test {
%p0.1 = f16[1,8,8]{2,1,0} parameter(0)
%p1.1 = f16[1,8,8]{2,1,0} parameter(1)
%p2.1 = f16[4,8,8]{2,1,0} parameter(2)
%c0.1 = u32[] constant(0)
%initial_tuple = tuple(%p0.1, %p1.1, %p2.1, u32[] %c0.1)
ROOT %while = while(%initial_tuple), condition=%Cond, body=%Body, backend_config={"known_trip_count":{"n":"8"}}
})";
const char* expected = R"(
}
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmParameterOffset) {
const char* hlo = R"(
HloModule test
ENTRY main.9 {
p0 = f16[1,8,8]{2,1,0} parameter(0)
p1 = f16[1,8,8]{2,1,0} parameter(1)
p2 = f16[4,8,8]{2,1,0} parameter(2)
p3 = s32[] parameter(3)
c1_s32 = s32[] constant(1)
c0_s32 = s32[] constant(0)
bitcast.41 = f16[8,8]{1,0} bitcast(p0)
bitcast.42 = f16[8,8]{1,0} bitcast(p1)
custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1)
ROOT dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, p3, c0_s32, c0_s32)
})";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSOffsetAsFunctionOfLoopIteration) {
const char* hlo = R"(
HloModule test_module, replica_count=2
add {
a = s64[] parameter(0)
b = s64[] parameter(1)
ROOT add = s64[] add(a, b)
}
Body {
param = (s64[], s64[16, 32], s64[8, 32]) parameter(0)
i = s64[] get-tuple-element(param), index=0
dest = s64[16,32] get-tuple-element(param), index=1
src = s64[8,32] get-tuple-element(param), index=2
eight = s64[] constant(8)
zero = s64[] constant(0)
thirty_two = s64[] constant(32)
add = s64[] add(eight, i)
add.2 = s64[] subtract(add, thirty_two)
compare = pred[] compare(add, thirty_two), direction=LT
offset = s64[] select(compare, add, add.2)
rs = s64[4,32] reduce-scatter(src), channel_id=1, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
fusion = s64[16,32] dynamic-update-slice(dest, rs, offset, zero)
one = s64[] constant(1)
i_plus_one = s64[] add(i, one)
ROOT tuple = tuple(i_plus_one, fusion, src)
}
Cond {
param = (s64[], s64[16,32], s64[8,32]) parameter(0)
loop_iter = s64[] get-tuple-element(param), index=0
c16 = s64[] constant(16)
ROOT compare = pred[] compare(loop_iter, c16), direction=LT
}
ENTRY main {
zero = s64[] constant(0)
dest = s64[16,32] parameter(0)
src = s64[8,32] parameter(1)
tuple = tuple(zero, dest, src)
ROOT while = while(tuple), body=Body, condition=Cond
}
)";
const char* expected = R"(
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(hlo));
TF_ASSERT_OK_AND_ASSIGN(
auto changed,
RunHloPass(DynamicSliceFusionRewriter("gpu"), module.get()));
EXPECT_TRUE(changed);
std::vector<const HloComputation*> fusions;
for (auto computation : module->computations()) {
if (computation->IsFusionComputation()) {
fusions.push_back(computation);
}
}
ASSERT_EQ(fusions.size(), 1);
const HloComputation* dynamic_slice_fusion = fusions[0];
TF_ASSERT_OK_AND_ASSIGN(
auto filecheck_match,
RunFileCheck(dynamic_slice_fusion->ToString(
HloPrintOptions{}.set_print_large_constants(true)),
expected));
EXPECT_TRUE(filecheck_match);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmLaxScan) {
const char* hlo = R"(
HloModule lax_scan
Body {
arg_tuple.15 = (s32[], f32[8,8]{1,0}, f32[8,8,8]{2,1,0}, f32[8,8,8]{2,1,0}, f32[8,8]{1,0}) parameter(0)
get-tuple-element.16 = s32[] get-tuple-element(arg_tuple.15), index=0
constant.21 = s32[] constant(1)
add.2 = s32[] add(get-tuple-element.16, constant.21)
get-tuple-element.30 = get-tuple-element(arg_tuple.15), index=4
get-tuple-element.18 = get-tuple-element(arg_tuple.15), index=2
get-tuple-element.19 = get-tuple-element(arg_tuple.15), index=3
constant.23 = s32[] constant(0)
compare.2 = pred[] compare(get-tuple-element.16, constant.23), direction=LT
constant.22 = s32[] constant(8)
add.3 = s32[] add(get-tuple-element.16, constant.22)
select.1 = s32[] select(compare.2, add.3, get-tuple-element.16)
dynamic-slice.1 = f32[1,8,8]{2,1,0} dynamic-slice(get-tuple-element.19, select.1, constant.23, constant.23), dynamic_slice_sizes={1,8,8}
bitcast.72 = f32[8,8]{1,0} bitcast(dynamic-slice.1)
get-tuple-element.17 = f32[8,8]{1,0} get-tuple-element(arg_tuple.15), index=1
custom-call.1 = (f32[8,8]{1,0}, s8[131072]{0}) custom-call(bitcast.72, get-tuple-element.17), custom_call_target="__cublas$gemm"
get-tuple-element = f32[8,8]{1,0} get-tuple-element(custom-call.1), index=0
bitcast.77 = f32[1,8,8]{2,1,0} bitcast(get-tuple-element)
dynamic-update-slice.1 = f32[8,8,8]{2,1,0} dynamic-update-slice(get-tuple-element.18, bitcast.77, select.1, constant.23, constant.23)
ROOT tuple.38 = tuple(add.2, get-tuple-element.30, dynamic-update-slice.1, get-tuple-element.19, get-tuple-element.30)
}
Cond {
arg_tuple.40 = (s32[], f32[8,8]{1,0}, f32[8,8,8]{2,1,0}, f32[8,8,8]{2,1,0}, f32[8,8]{1,0}) parameter(0)
get-tuple-element.41 = s32[] get-tuple-element(arg_tuple.40), index=0
constant.46 = s32[] constant(8)
ROOT compare.3 = pred[] compare(get-tuple-element.41, constant.46), direction=LT
}
ENTRY main {
constant.4 = s32[] constant(0)
Arg_1.2 = f32[8,8]{1,0} parameter(1)
constant.5 = f32[] constant(0)
broadcast.1 = f32[8,8,8]{2,1,0} broadcast(constant.5), dimensions={}
Arg_2.3 = f32[8,8,8]{2,1,0} parameter(2)
Arg_0.1 = f32[8,8]{1,0} parameter(0)
tuple.7 = tuple(constant.4, Arg_1.2, broadcast.1, Arg_2.3, Arg_0.1)
while.48 = while(tuple.7), condition=Cond, body=Body, backend_config={"known_trip_count":{"n":"8"}}
get-tuple-element.50 = get-tuple-element(while.48), index=1
get-tuple-element.51 = get-tuple-element(while.48), index=2
ROOT tuple.54 = tuple(get-tuple-element.50, get-tuple-element.51)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, DUSReduceScatterTupleNoTransform) {
const char* hlo = R"(
HloModule test, replica_count=2
add {
param_0 = f16[] parameter(0)
param_1 = f16[] parameter(1)
ROOT add.1 = f16[] add(param_0, param_1)
}
ENTRY main.9 {
param_0 = f16[128,128]{1,0} parameter(0)
param_1 = f16[128,128]{1,0} parameter(1)
param_2 = f16[128,128]{1,0} parameter(2)
constant_20 = u32[] constant(20)
constant_0 = u32[] constant(0)
reduce-scatter = (f16[64,128]{1,0}, f16[64,128]{1,0}) reduce-scatter(param_0, param_2), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
rs1 = get-tuple-element(reduce-scatter), index=0
ROOT loop_dynamic_update_slice_fusion = f16[128,128]{1,0} dynamic-update-slice(param_1, rs1, constant_20, constant_0)
})";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterSlice) {
const char* hlo = R"(
HloModule jit_slice, replica_count=2
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = add(a,b)
}
ENTRY %main.9 {
p0 = s32[2,8,32]{2,1,0} parameter(0)
slice = s32[1,8,32]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:32]}
bc = s32[8,32]{1,0} bitcast(%slice)
ROOT rs = s32[4,32] reduce-scatter(bc), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
})";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDynamicSlice) {
const char* hlo = R"(
HloModule jit_slice, replica_count=2
add {
a = s32[] parameter(0)
b = s32[] parameter(1)
ROOT add = add(a,b)
}
ENTRY %main.9 {
p0 = s32[2,8,32]{2,1,0} parameter(0)
c0 = s32[] constant(0)
c1 = s32[] constant(1)
slice = s32[1,8,32]{2,1,0} dynamic-slice(p0, c1, c0, c0), dynamic_slice_sizes={1,8,32}
bc = s32[8,32]{1,0} bitcast(%slice)
ROOT rs = s32[4,32] reduce-scatter(bc), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add
})";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, ReplicaIdAndPartitionIdAsOffset) {
const char* hlo = R"(
HloModule test_module, replica_count=2, num_partitions=2
ENTRY main {
p0 = s32[32,32] parameter(0)
p1 = s32[32,32] parameter(1)
p2 = s32[64,32] parameter(2)
c10 = u32[] constant(10)
c0 = u32[] constant(0)
call1 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm"
dus1 = s32[64,32] dynamic-update-slice(p2, call1, c10, c0)
replica = u32[] replica-id()
call2 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm"
dus2 = s32[64,32] dynamic-update-slice(p2, call2, replica, c0)
partition = u32[] partition-id()
call3 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm"
dus3 = s32[64,32] dynamic-update-slice(p2, call3, partition, c0)
ROOT tuple = tuple(dus1, dus2, dus3)
}
)";
const char* expected = R"(
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected);
}
TEST_F(DynamicSliceFusionRewriterTest, ParameterOffsetThroughWhileLoop) {
const char* hlo = R"(
HloModule test
Body {
p = (s32[], s32[32,32], s32[32,32], s32[64,32], s32[]) parameter(0)
i = get-tuple-element(p), index=0
p0 = get-tuple-element(p), index=1
p1 = get-tuple-element(p), index=2
p2 = s32[64,32] get-tuple-element(p), index=3
offset = s32[] get-tuple-element(p), index=4
c0 = s32[] constant(0)
call = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm"
dus = s32[64,32] dynamic-update-slice(p2, call, offset, c0)
c1 = s32[] constant(1)
i_plus_one = add(i, c1)
ROOT tuple = tuple(i_plus_one, p1, p0, dus, offset)
}
Cond {
p = (s32[], s32[32,32], s32[32,32], s32[64,32], s32[]) parameter(0)
i = get-tuple-element(p), index=0
c4 = s32[] constant(4)
ROOT compare = compare(i, c4), direction=LT
}
ENTRY main {
offset = s32[] parameter(0)
p0 = s32[32,32] parameter(1)
p1 = s32[32,32] parameter(2)
p2 = s32[64,32] parameter(3)
c0 = s32[] constant(0)
tuple = tuple(c0, p0, p1, p2, offset)
ROOT while = while(tuple), body=Body, condition=Cond
}
)";
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"),
std::nullopt);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa2137ae-9b6f-4c3e-b411-fc2113fca95c | cpp | tensorflow/tensorflow | save_dataset_op | tensorflow/core/kernels/data/experimental/save_dataset_op.cc | tensorflow/core/kernels/data/experimental/save_dataset_op_test.cc | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/captured_function.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/hash_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/root_dataset.h"
#include "tensorflow/core/data/snapshot_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr const char* const SaveDatasetOp::kCompression;
constexpr const char* const SaveDatasetOp::kPath;
constexpr const char* const SaveDatasetOp::kShardFunc;
constexpr const char* const SaveDatasetOp::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetOp::kUseShardFunc;
constexpr const int SaveDatasetOp::kFileFormatVersion;
constexpr const char* const SaveDatasetV2Op::kInputDataset;
constexpr const char* const SaveDatasetV2Op::kPath;
constexpr const char* const SaveDatasetV2Op::kCompression;
constexpr const char* const SaveDatasetV2Op::kDatasetType;
constexpr const char* const SaveDatasetV2Op::kOutputTypes;
constexpr const char* const SaveDatasetV2Op::kOutputShapes;
constexpr const char* const SaveDatasetV2Op::kShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncOtherArgs;
constexpr const char* const SaveDatasetV2Op::kUseShardFunc;
constexpr const char* const SaveDatasetV2Op::kShardFuncTarguments;
constexpr const int SaveDatasetV2Op::kFileFormatVersion;
SaveDatasetOp::SaveDatasetOp(OpKernelConstruction* ctx)
: HybridAsyncOpKernel(ctx, "tf_data_save_dataset") {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
}
Status SaveDatasetOp::DoCompute(OpKernelContext* ctx) {
metrics::RecordTFDataFetchOp("SaveDatasetOp");
DatasetBase* dataset;
TF_RETURN_IF_ERROR(GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
TF_RETURN_IF_ERROR(ParseScalarArgument(ctx, kPath, &path));
auto run_id = random::New64();
auto run_dir = snapshot_util::RunDirectory(path, run_id);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir));
TF_RETURN_IF_ERROR(
WriteMetadataFile(ctx->env(), path, run_id, dataset->output_dtypes(),
0, false));
std::unique_ptr<CapturedFunction> captured_func;
TF_RETURN_IF_ERROR(CapturedFunction::Create(
ctx, func_metadata_, kShardFuncOtherArgs, &captured_func));
uint64 num_elements = 0;
TF_RETURN_IF_ERROR(WriteData(ctx, dataset, std::move(captured_func), run_dir,
&num_elements));
TF_RETURN_IF_ERROR(WriteMetadataFile(ctx->env(), path, run_id,
dataset->output_dtypes(), num_elements,
true));
return absl::OkStatus();
}
Status SaveDatasetOp::WriteData(OpKernelContext* ctx, DatasetBase* dataset,
std::unique_ptr<CapturedFunction> captured_func,
const std::string& run_dir,
uint64* num_elements) {
IteratorContext::Params params(ctx);
auto function_handle_cache =
std::make_unique<FunctionHandleCache>(params.flr);
params.function_handle_cache = function_handle_cache.get();
ResourceMgr resource_mgr;
params.resource_mgr = &resource_mgr;
CancellationManager cancellation_manager(ctx->cancellation_manager());
params.cancellation_manager = &cancellation_manager;
IteratorContext iter_ctx(std::move(params));
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func;
TF_RETURN_IF_ERROR(
captured_func->Instantiate(&iter_ctx, &instantiated_captured_func));
DatasetBase* finalized_dataset;
TF_RETURN_IF_ERROR(FinalizeDataset(ctx, dataset, &finalized_dataset));
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(finalized_dataset->MakeIterator(
&iter_ctx, nullptr, "Save", &iterator));
mutex mu;
Status status;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers;
while (true) {
if (ctx->cancellation_manager()->IsCancelled()) {
return errors::Cancelled("Operation was cancelled");
}
std::vector<Tensor> element;
bool end_of_input;
TF_RETURN_IF_ERROR(iterator->GetNext(&iter_ctx, &element, &end_of_input));
if (end_of_input) {
break;
}
(*num_elements)++;
int64_t shard_index = -1;
TF_RETURN_IF_ERROR(GetShardIndex(
&iter_ctx, instantiated_captured_func.get(), element, &shard_index));
if (writers.count(shard_index) == 0) {
const auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir, shard_index);
auto writer_thread = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
0, compression_, kFileFormatVersion,
finalized_dataset->output_dtypes(), [&mu, &status](Status s) {
mutex_lock l(mu);
status.Update(s);
});
writers.insert({shard_index, std::move(writer_thread)});
}
writers[shard_index]->Write(element);
}
for (auto& writer : writers) {
writer.second->SignalEOF();
}
writers.clear();
return status;
}
Status SaveDatasetOp::GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
int64_t* shard_index) {
if (!use_shard_func_) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument("`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status SaveDatasetOp::WriteMetadataFile(Env* env, const std::string& path,
uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
class SaveDatasetV2Op::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input, const tstring& path,
const std::string& compression,
std::unique_ptr<CapturedFunction> shard_func, bool use_shard_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
path_(path),
compression_(compression),
shard_func_(std::move(shard_func)),
use_shard_func_(use_shard_func) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* path_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(path_, &path_node));
std::vector<Node*> shard_func_other_args;
DataTypeVector shard_func_other_args_types;
TF_RETURN_IF_ERROR(shard_func_->AddToGraph(ctx, b, &shard_func_other_args,
&shard_func_other_args_types));
AttrValue compression_attr;
b->BuildAttrValue(compression_, &compression_attr);
AttrValue shard_func_attr;
b->BuildAttrValue(shard_func_->func(), &shard_func_attr);
AttrValue use_shard_func_attr;
b->BuildAttrValue(use_shard_func_, &use_shard_func_attr);
AttrValue shard_func_arguments_types_attr;
b->BuildAttrValue(shard_func_other_args_types,
&shard_func_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this,
{std::make_pair(0, input_graph_node), std::make_pair(1, path_node)},
{std::make_pair(2, shard_func_other_args)},
{std::make_pair(kCompression, compression_attr),
std::make_pair(kShardFunc, shard_func_attr),
std::make_pair(kUseShardFunc, use_shard_func_attr),
std::make_pair(kShardFuncTarguments, shard_func_arguments_types_attr)},
output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
static constexpr const char* const kIteratorName = "Writer";
static constexpr const char* const kRunId = "run_id";
static constexpr const char* const kCurrentCheckpointId =
"current_checkpoint_id";
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
writers_closed_(false),
run_id_(0),
current_checkpoint_id_(0) {}
~Iterator() override {
mutex_lock l(mu_);
SignalEOF(true);
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
dataset()->shard_func_->Instantiate(ctx, &instantiated_shard_func_));
if (!ctx->is_restoring()) {
run_id_ = random::New64();
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = false;
snapshot_util::AsyncWriter* current_writer;
{
std::vector<Tensor> output_tensors;
mutex_lock l(mu_);
{
mutex_lock wsl(writer_status_mu_);
if (!writer_status_.ok() || writers_closed_) {
*end_of_sequence = true;
return writer_status_;
}
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
SignalEOF(true);
{
mutex_lock wsl(writer_status_mu_);
TF_RETURN_IF_ERROR(writer_status_);
}
return WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
dataset()->Cardinality(), true);
}
(num_elements_)++;
int64_t shard_index = 0;
TF_RETURN_IF_ERROR(
GetShardIndex(ctx, instantiated_shard_func_.get(), *out_tensors,
dataset()->use_shard_func_, &shard_index));
if (writers_.count(shard_index) == 0) {
auto snapshot_shard_directory =
snapshot_util::ShardDirectory(run_dir_, shard_index);
auto writer = std::make_unique<snapshot_util::AsyncWriter>(
ctx->env(), shard_index, snapshot_shard_directory,
current_checkpoint_id_, dataset()->compression_,
kFileFormatVersion, dataset()->output_dtypes(), [this](Status s) {
if (!s.ok()) {
mutex_lock l(writer_status_mu_);
writer_status_ = s;
}
});
writers_.insert({shard_index, std::move(writer)});
}
current_writer = writers_[shard_index].get();
}
current_writer->Write(*out_tensors);
return absl::OkStatus();
}
protected:
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(full_name(kRunId),
static_cast<int64_t>(run_id_)));
TF_RETURN_IF_ERROR(
writer->WriteScalar(full_name(kCurrentCheckpointId),
static_cast<int64_t>(current_checkpoint_id_)));
SignalEOF(false);
writers_.clear();
current_checkpoint_id_++;
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t run_id_signed;
int64_t current_checkpoint_id;
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kRunId), &run_id_signed));
TF_RETURN_IF_ERROR(reader->ReadScalar(full_name(kCurrentCheckpointId),
¤t_checkpoint_id));
run_id_ = static_cast<uint64>(run_id_signed);
run_dir_ = snapshot_util::RunDirectory(
io::JoinPath(dataset()->writer_prefix_, dataset()->path_), run_id_);
current_checkpoint_id_ = static_cast<uint64>(current_checkpoint_id);
if (ctx->is_restoring()) {
TF_RETURN_IF_ERROR(ctx->env()->RecursivelyCreateDir(run_dir_));
TF_RETURN_IF_ERROR(WriteMetadataFile(
ctx->env(), dataset()->path_, run_id_, dataset()->output_dtypes(),
0, false));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
Status GetShardIndex(IteratorContext* ctx,
InstantiatedCapturedFunction* function,
const std::vector<Tensor>& element,
bool use_shard_func, int64_t* shard_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!use_shard_func) {
*shard_index = (*shard_index + 1) % GetCpuBudget();
return absl::OkStatus();
}
std::vector<Tensor> output_tensors;
TF_RETURN_IF_ERROR(function->RunWithBorrowedArgs(
ctx, element, &output_tensors, nullptr));
if (output_tensors.size() != 1 || output_tensors[0].dtype() != DT_INT64 ||
output_tensors[0].NumElements() != 1) {
return errors::InvalidArgument(
"`shard_func` must return a scalar int64.");
}
*shard_index = output_tensors[0].flat<int64_t>()(0);
return absl::OkStatus();
}
Status WriteMetadataFile(Env* env, const std::string& path, uint64 run_id,
const DataTypeVector& output_dtypes,
uint64 num_elements, bool finalized)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
SnapshotMetadataRecord metadata;
metadata.set_creation_timestamp(EnvTime::NowMicros());
metadata.set_run_id(
strings::Printf("%llu", static_cast<unsigned long long>(run_id)));
metadata.set_version(kFileFormatVersion);
for (const auto& output_dtype : output_dtypes) {
metadata.add_dtype(output_dtype);
}
metadata.set_finalized(finalized);
metadata.set_num_elements(num_elements);
return snapshot_util::WriteMetadataFile(env, path, &metadata);
}
void SignalEOF(bool mark_closed) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!writers_closed_) {
for (auto& writer : writers_) {
writer.second->SignalEOF();
}
writers_.clear();
writers_closed_ = mark_closed;
}
}
mutex mu_;
mutex writer_status_mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t num_elements_;
absl::flat_hash_map<int64_t, std::unique_ptr<snapshot_util::AsyncWriter>>
writers_ TF_GUARDED_BY(mu_);
Status writer_status_ TF_GUARDED_BY(writer_status_mu_);
bool writers_closed_ TF_GUARDED_BY(mu_);
uint64 run_id_ TF_GUARDED_BY(mu_);
tstring run_dir_ TF_GUARDED_BY(mu_);
uint64 current_checkpoint_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_shard_func_
TF_GUARDED_BY(mu_);
};
const DatasetBase* input_;
const tstring path_;
const std::string compression_;
const std::unique_ptr<CapturedFunction> shard_func_;
const bool use_shard_func_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const std::shared_ptr<FunctionMetadata> func_metadata_;
const std::string writer_prefix_;
};
SaveDatasetV2Op::SaveDatasetV2Op(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr(kCompression, &compression_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kUseShardFunc, &use_shard_func_));
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kShardFunc, {},
&func_metadata_));
}
void SaveDatasetV2Op::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
DatasetBase* dataset;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(0), &dataset));
tstring path;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kPath, &path));
std::unique_ptr<CapturedFunction> shard_func;
OP_REQUIRES_OK(
ctx, CapturedFunction::Create(ctx, func_metadata_, kShardFuncOtherArgs,
&shard_func));
*output = new Dataset(ctx, dataset, path, compression_, std::move(shard_func),
use_shard_func_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("SaveDataset").Device(DEVICE_CPU), SaveDatasetOp);
REGISTER_KERNEL_BUILDER(Name("SaveDatasetV2").Device(DEVICE_CPU),
SaveDatasetV2Op);
}
}
}
} | #include "tensorflow/core/kernels/data/experimental/save_dataset_op.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace data {
namespace experimental {
constexpr char kSaveDatasetV2NodeName[] = "save_dataset_v2";
class SaveDatasetV2Params : public DatasetParams {
public:
template <typename T>
SaveDatasetV2Params(T input_dataset_params, const tstring& path,
const std::string& compression,
FunctionDefHelper::AttrValueWrapper shard_func,
std::vector<FunctionDef> func_lib, bool use_shard_func,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name, DataTypeVector type_arguments)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
path_(path),
compression_(compression),
shard_func_(shard_func),
func_lib_(std::move(func_lib)),
use_shard_func_(use_shard_func),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors;
input_tensors.emplace_back(CreateTensor<tstring>(TensorShape({}), {path_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(SaveDatasetV2Op::kInputDataset);
input_names->emplace_back(SaveDatasetV2Op::kPath);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back(SaveDatasetV2Op::kCompression, compression_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFunc, shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kUseShardFunc, use_shard_func_);
attr_vector->emplace_back(SaveDatasetV2Op::kShardFuncTarguments,
type_arguments_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputTypes, output_dtypes_);
attr_vector->emplace_back(SaveDatasetV2Op::kOutputShapes, output_shapes_);
return absl::OkStatus();
}
string path() const { return path_; }
string dataset_type() const override { return SaveDatasetV2Op::kDatasetType; }
string op_name() const override { return "SaveDatasetV2"; }
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
private:
std::string path_;
std::string compression_;
FunctionDefHelper::AttrValueWrapper shard_func_;
std::vector<FunctionDef> func_lib_;
bool use_shard_func_;
DataTypeVector type_arguments_;
};
class SaveDatasetV2OpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const SaveDatasetV2Params&>(dataset_params);
save_filename_ = params.path();
return absl::OkStatus();
}
protected:
std::string save_filename_;
};
SaveDatasetV2Params SaveDatasetV2Params1() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 10, 2),
io::JoinPath(testing::TmpDir(), "save_data"),
"",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
false,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
SaveDatasetV2Params SaveDatasetV2Params2() {
return SaveDatasetV2Params(
RangeDatasetParams(0, 5, 1),
io::JoinPath(testing::TmpDir(), "save_data"),
"GZIP",
FunctionDefHelper::FunctionRef("XTimesTwo", {{"T", DT_INT64}}),
{test::function::XTimesTwo()},
true,
{DT_INT64},
{PartialTensorShape({})},
kSaveDatasetV2NodeName,
{});
}
std::vector<GetNextTestCase<SaveDatasetV2Params>> GetNextTestCases() {
return {{
SaveDatasetV2Params1(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedGetNextTest : public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(SaveDatasetV2OpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SaveDatasetV2OpTest, DatasetNodeName) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(SaveDatasetV2OpTest, DatasetTypeString) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::OpNameParams params;
params.op_version = dataset_params.op_version();
TF_ASSERT_OK(CheckDatasetTypeString("SaveDatasetV2"));
}
TEST_F(SaveDatasetV2OpTest, DatasetOutputDtypes) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes(dataset_params.output_dtypes()));
}
std::vector<DatasetOutputDtypesTestCase<SaveDatasetV2Params>>
DatasetOutputDtypesTestCases() {
return {{SaveDatasetV2Params1(),
{DT_INT64}},
{SaveDatasetV2Params2(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SaveDatasetV2Params>>
DatasetOutputShapesTestCases() {
return {{SaveDatasetV2Params1(),
{PartialTensorShape({})}},
{SaveDatasetV2Params2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SaveDatasetV2Params>> CardinalityTestCases() {
return {{SaveDatasetV2Params1(),
5},
{SaveDatasetV2Params2(),
5}};
}
DATASET_CARDINALITY_TEST_P(SaveDatasetV2OpTest, SaveDatasetV2Params,
CardinalityTestCases())
TEST_F(SaveDatasetV2OpTest, IteratorPrefix) {
auto dataset_params = SaveDatasetV2Params1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
SaveDatasetV2Op::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>>
IteratorSaveAndRestoreTestCases() {
return {{SaveDatasetV2Params1(),
{0, 2, 4, 6, 8},
CreateTensors<int64_t>(TensorShape({}), {{0}, {2}, {4}, {6}, {8}})},
{SaveDatasetV2Params2(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({}), {{0}, {1}, {2}, {3}, {4}})}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SaveDatasetV2OpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SaveDatasetV2Params>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
const std::vector<int>& breakpoints = test_case.breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
cur_iteration++;
}
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_CASE_P(SaveDatasetV2OpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/save_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/experimental/save_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fad28bcd-2f8e-4dd8-b982-85cf806a6dab | cpp | google/libaddressinput | address_input_helper | cpp/src/address_input_helper.cc | cpp/test/address_input_helper_test.cc | #include <libaddressinput/address_input_helper.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_metadata.h>
#include <libaddressinput/preload_supplier.h>
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
#include <re2/re2.h>
#include "language.h"
#include "lookup_key.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/re2ptr.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
struct Node {
const Node* parent;
const Rule* rule;
};
namespace {
const char kLookupKeySeparator = '/';
const size_t kHierarchyDepth = size(LookupKey::kHierarchy);
std::string GetBestName(const Language& language, const Rule& rule) {
if (language.has_latin_script) {
const std::string& name = rule.GetLatinName();
if (!name.empty()) {
return name;
}
}
const std::string& id = rule.GetId();
std::string::size_type pos = id.rfind(kLookupKeySeparator);
assert(pos != std::string::npos);
return id.substr(pos + 1);
}
void FillAddressFromMatchedRules(
const std::vector<Node>* hierarchy,
AddressData* address) {
assert(hierarchy != nullptr);
assert(address != nullptr);
Language language(address->language_code);
for (size_t depth = kHierarchyDepth - 1; depth > 0; --depth) {
if (hierarchy[depth].size() == 1) {
for (const Node* node = &hierarchy[depth].front();
node != nullptr; node = node->parent, --depth) {
const Rule* rule = node->rule;
assert(rule != nullptr);
AddressField field = LookupKey::kHierarchy[depth];
if (address->IsFieldEmpty(field)) {
address->SetFieldValue(field, GetBestName(language, *rule));
}
}
break;
}
}
}
}
AddressInputHelper::AddressInputHelper(PreloadSupplier* supplier)
: supplier_(supplier) {
assert(supplier_ != nullptr);
}
void AddressInputHelper::FillAddress(AddressData* address) const {
assert(address != nullptr);
const std::string& region_code = address->region_code;
if (!RegionDataConstants::IsSupported(region_code)) {
return;
}
AddressData lookup_key_address;
lookup_key_address.region_code = region_code;
LookupKey lookup_key;
lookup_key.FromAddress(lookup_key_address);
const Rule* region_rule = supplier_->GetRule(lookup_key);
assert(region_rule != nullptr);
const RE2ptr* postal_code_reg_exp = region_rule->GetPostalCodeMatcher();
if (postal_code_reg_exp != nullptr) {
if (address->postal_code.empty()) {
address->postal_code = region_rule->GetSolePostalCode();
}
if (!address->postal_code.empty() &&
RE2::FullMatch(address->postal_code, *postal_code_reg_exp->ptr)) {
std::vector<Node> hierarchy[kHierarchyDepth];
CheckChildrenForPostCodeMatches(*address, lookup_key, nullptr, hierarchy);
FillAddressFromMatchedRules(hierarchy, address);
}
}
}
void AddressInputHelper::CheckChildrenForPostCodeMatches(
const AddressData& address,
const LookupKey& lookup_key,
const Node* parent,
std::vector<Node>* hierarchy) const {
const Rule* rule = supplier_->GetRule(lookup_key);
assert(rule != nullptr);
const RE2ptr* postal_code_prefix = rule->GetPostalCodeMatcher();
if (postal_code_prefix == nullptr ||
RE2::PartialMatch(address.postal_code, *postal_code_prefix->ptr)) {
size_t depth = lookup_key.GetDepth();
assert(depth < size(LookupKey::kHierarchy));
hierarchy[depth].emplace_back();
Node* node = &hierarchy[depth].back();
node->parent = parent;
node->rule = rule;
if (depth < size(LookupKey::kHierarchy) - 1 &&
IsFieldUsed(LookupKey::kHierarchy[depth + 1], address.region_code)) {
for (const auto& sub_key : rule->GetSubKeys()) {
LookupKey child_key;
child_key.FromLookupKey(lookup_key, sub_key);
CheckChildrenForPostCodeMatches(address, child_key, node, hierarchy);
}
}
}
}
}
} | #include <libaddressinput/address_input_helper.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/callback.h>
#include <libaddressinput/null_storage.h>
#include <libaddressinput/preload_supplier.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "mock_source.h"
#include "testdata_source.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressInputHelper;
using i18n::addressinput::BuildCallback;
using i18n::addressinput::MockSource;
using i18n::addressinput::NullStorage;
using i18n::addressinput::PreloadSupplier;
using i18n::addressinput::TestdataSource;
class AddressInputHelperTest : public testing::Test {
public:
AddressInputHelperTest(const AddressInputHelperTest&) = delete;
AddressInputHelperTest& operator=(const AddressInputHelperTest&) = delete;
protected:
AddressInputHelperTest()
: supplier_(new TestdataSource(true), new NullStorage),
address_input_helper_(&supplier_),
loaded_(BuildCallback(this, &AddressInputHelperTest::Loaded)) {}
void FillAddress(AddressData* address) {
const std::string& region_code = address->region_code;
if (!region_code.empty()) {
supplier_.LoadRules(region_code, *loaded_);
}
address_input_helper_.FillAddress(address);
}
private:
void Loaded(bool success, const std::string&, int) { ASSERT_TRUE(success); }
PreloadSupplier supplier_;
const AddressInputHelper address_input_helper_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_;
};
TEST_F(AddressInputHelperTest, AddressWithMissingPostalCode) {
AddressData address{
.region_code = "CX",
.administrative_area = "WA",
};
AddressData expected = address;
expected.postal_code = "6798";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithPostalCodeMatchingAdmin) {
AddressData address{
.region_code = "US",
.address_line{"10 High St"},
.postal_code = "58098",
};
AddressData expected = address;
expected.administrative_area = "ND";
FillAddress(&address);
EXPECT_EQ(expected, address);
address.administrative_area = "CA";
expected.administrative_area = "CA";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithPostalCodeMatchingLowerLevel) {
AddressData address{
.region_code = "TW",
.postal_code = "53012",
};
AddressData expected = address;
expected.administrative_area = "彰化縣";
expected.locality = "二水鄉";
FillAddress(&address);
EXPECT_EQ(expected, address);
address.administrative_area = "Already filled in";
expected.administrative_area = "Already filled in";
address.locality = "";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithPostalCodeMatchingLowerLevelLatin) {
AddressData address{
.region_code = "TW",
.postal_code = "53012",
.language_code = "zh-Latn",
};
AddressData expected = address;
expected.locality = "Ershuei Township";
expected.administrative_area = "Changhua County";
FillAddress(&address);
EXPECT_EQ(expected, address);
address.administrative_area = "Already filled in";
expected.administrative_area = "Already filled in";
address.locality = "";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithPostalCodeMatchingDependentLocality) {
AddressData address{
.region_code = "KR",
.postal_code = "425-111",
};
AddressData expected = address;
expected.administrative_area = "경기도";
expected.locality = "안산시";
expected.dependent_locality = "단원구";
FillAddress(&address);
EXPECT_EQ(expected, address);
AddressData address_ko_latn{
.region_code = "KR",
.postal_code = "425-111",
.language_code = "ko-Latn",
};
expected = address_ko_latn;
expected.administrative_area = "Gyeonggi";
expected.locality = "Ansan-si";
expected.dependent_locality = "Danwon-gu";
FillAddress(&address_ko_latn);
EXPECT_EQ(expected, address_ko_latn);
}
TEST_F(AddressInputHelperTest, AddressWithPostalCodeMatchingMultipleValues) {
AddressData address{
.region_code = "KR",
.postal_code = "527-111",
};
AddressData expected = address;
expected.administrative_area = "전라남도";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithInvalidPostalCode) {
AddressData address{
.region_code = "US",
.postal_code = "970",
};
AddressData expected = address;
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithNoPostalCodeValidation) {
AddressData address{
.region_code = "GA",
.postal_code = "123",
};
AddressData expected = address;
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, AddressWithInvalidOrMissingRegionCode) {
AddressData address{
.administrative_area = "YYY",
.postal_code = "XXX",
};
AddressData expected = address;
FillAddress(&address);
EXPECT_EQ(expected, address);
address.region_code = "XXXX";
expected.region_code = "XXXX";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperTest, RegionWithUnusedAdminAreaNames) {
AddressData address{
.region_code = "CH",
.postal_code = "1111",
.language_code = "de-CH",
};
AddressData expected = address;
FillAddress(&address);
EXPECT_EQ(expected, address);
}
class AddressInputHelperMockDataTest : public testing::Test {
public:
AddressInputHelperMockDataTest(
const AddressInputHelperMockDataTest&) = delete;
AddressInputHelperMockDataTest& operator=(
const AddressInputHelperMockDataTest&) = delete;
protected:
AddressInputHelperMockDataTest()
: source_(new MockSource),
supplier_(source_, new NullStorage),
address_input_helper_(&supplier_),
loaded_(BuildCallback(this, &AddressInputHelperMockDataTest::Loaded)) {}
void FillAddress(AddressData* address) {
const std::string& region_code = address->region_code;
if (!region_code.empty()) {
supplier_.LoadRules(region_code, *loaded_);
}
address_input_helper_.FillAddress(address);
}
MockSource* const source_;
private:
void Loaded(bool success, const std::string&, int) { ASSERT_TRUE(success); }
PreloadSupplier supplier_;
const AddressInputHelper address_input_helper_;
const std::unique_ptr<const PreloadSupplier::Callback> loaded_;
};
TEST_F(AddressInputHelperMockDataTest,
PostalCodeSharedAcrossDifferentHierarchies) {
source_->data_ = {
{
"data/KR",
R"({"data/KR": )"
R"({"id":"data/KR", "sub_keys":"A~B", "zip":"\\d{5}"}, )"
R"("data/KR/A": )"
R"({"id":"data/KR/A", "sub_keys":"A1"}, )"
R"("data/KR/A/A1": )"
R"({"id":"data/KR/A/A1", "zip":"1"}, )"
R"("data/KR/B": )"
R"({"id":"data/KR/B", "sub_keys":"B1"}, )"
R"("data/KR/B/B1": )"
R"({"id":"data/KR/B/B1", "zip":"12"}})"}};
AddressData address{
.region_code = "KR",
.administrative_area = "",
.postal_code = "12345",
};
AddressData expected = address;
FillAddress(&address);
EXPECT_EQ(expected, address);
}
TEST_F(AddressInputHelperMockDataTest,
PostalCodeSharedAcrossDifferentHierarchiesSameState) {
source_->data_ = {
{
"data/KR",
R"({"data/KR": )"
R"({"id":"data/KR", "sub_keys":"A~B", "zip":"\\d{5}"}, )"
R"("data/KR/A": )"
R"({"id":"data/KR/A", "sub_keys":"A1~A2"}, )"
R"("data/KR/A/A1": )"
R"({"id":"data/KR/A/A1", "sub_keys":"A1a", "zip":"1"}, )"
R"("data/KR/A/A1/A1a": )"
R"({"id":"data/KR/A/A1/A1a", "zip":"12"}, )"
R"("data/KR/A/A2": )"
R"({"id":"data/KR/A/A2", "sub_keys":"A2a", "zip":"1"}, )"
R"("data/KR/A/A2/A2a": )"
R"({"id":"data/KR/A/A2/A2a", "zip":"123"}, )"
R"("data/KR/B": )"
R"({"id":"data/KR/B", "zip":"2"}})"}};
AddressData address{
.region_code = "KR",
.administrative_area = "",
.postal_code = "12345",
};
AddressData expected = address;
expected.administrative_area = "A";
FillAddress(&address);
EXPECT_EQ(expected, address);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_input_helper.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_input_helper_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
2a963f4b-b152-42b3-a38d-8c8eef877309 | cpp | google/arolla | protopath_id | arolla/naming/protopath_id.cc | arolla/naming/protopath_id_test.cc | #include "arolla/naming/protopath_id.h"
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "arolla/naming/table.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::naming {
namespace {
std::string FormatAsProtopathId(const std::vector<PathSegment>& segments) {
return absl::StrJoin(segments, "",
[](std::string* ret, const PathSegment& segment) {
absl::StrAppend(ret, "/", segment.FieldName(),
segment.IsIndex() ? kIndexMarker : "");
});
}
PathSegment ParsePathSegment(absl::string_view segment_name) {
bool is_index = absl::ConsumeSuffix(&segment_name, kIndexMarker);
return PathSegment(segment_name, is_index);
}
absl::StatusOr<std::vector<PathSegment>> ParseProtopathId(
absl::string_view protopath_id) {
std::vector<PathSegment> parsed;
if (protopath_id.empty()) {
return parsed;
}
if (protopath_id[0] != '/') {
return absl::InvalidArgumentError(
absl::StrFormat("ProtopathId (%s) formatted incorrectly. "
"Must start with a slash (/).",
protopath_id));
}
protopath_id.remove_prefix(1);
while (!protopath_id.empty()) {
const size_t segment_len = protopath_id.find_first_of('/');
const absl::string_view segment = protopath_id.substr(0, segment_len);
parsed.push_back(ParsePathSegment(segment));
if (segment_len == std::string::npos) {
break;
}
protopath_id.remove_prefix(segment_len + 1);
}
return parsed;
}
}
std::string TablePathToProtopathId(const TablePath& table_path) {
return FormatAsProtopathId(table_path.PathSegments());
}
std::string ColumnPathToProtopathId(const ColumnPath& column_path) {
return FormatAsProtopathId(column_path.PathSegments());
}
absl::StatusOr<TablePath> TablePathFromProtopathId(
absl::string_view protopath_id) {
ASSIGN_OR_RETURN(auto segments, ParseProtopathId(protopath_id));
return TablePath(std::move(segments));
}
absl::StatusOr<ColumnPath> ColumnPathFromProtopathId(
absl::string_view protopath_id) {
ASSIGN_OR_RETURN(auto segments, ParseProtopathId(protopath_id));
return ColumnPath(std::move(segments));
}
} | #include "arolla/naming/protopath_id.h"
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/naming/table.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::naming {
namespace {
TEST(Formatter, Format) {
TablePath root;
EXPECT_EQ(TablePathToProtopathId(root), "");
EXPECT_EQ(TablePathToProtopathId(root.Child("foo", true).Child("bar", false)),
"/foo[:]/bar");
EXPECT_EQ(
ColumnPathToProtopathId(
root.Child("foo", true).Child("bar", false).Column("baz", true)),
"/foo[:]/bar/baz[:]");
}
TEST(Formatter, FormatSizeColumn) {
TablePath root;
EXPECT_EQ(ColumnPathToProtopathId(
root.Child("foo", true).Child("bar", false).Size("baz")),
"/foo[:]/bar/baz/@size");
}
TEST(Parser, ParseRootTablePath) {
ASSERT_OK_AND_ASSIGN(TablePath root_path, TablePathFromProtopathId("/"));
EXPECT_EQ(root_path.FullName(), "");
ASSERT_OK_AND_ASSIGN(root_path, TablePathFromProtopathId(""));
EXPECT_EQ(root_path.FullName(), "");
}
TEST(Parser, ParseInvalidTablePath) {
EXPECT_FALSE(TablePathFromProtopathId("invalid/path").ok());
}
TEST(Parser, ParseNestedTablePath) {
ASSERT_OK_AND_ASSIGN(TablePath nested_path,
TablePathFromProtopathId("/query/doc"));
EXPECT_EQ(nested_path.FullName(), "/query/doc");
ASSERT_OK_AND_ASSIGN(nested_path, TablePathFromProtopathId("/query/doc/"));
EXPECT_EQ(nested_path.FullName(), "/query/doc");
ASSERT_OK_AND_ASSIGN(nested_path, TablePathFromProtopathId("/query"));
EXPECT_EQ(nested_path.FullName(), "/query");
ASSERT_OK_AND_ASSIGN(nested_path, TablePathFromProtopathId("/query/"));
EXPECT_EQ(nested_path.FullName(), "/query");
}
TEST(Parser, ParseNestedColumnPath) {
ASSERT_OK_AND_ASSIGN(ColumnPath nested_path,
ColumnPathFromProtopathId("/query[:]/query_text"));
EXPECT_EQ(nested_path.PathSegments(),
(std::vector<PathSegment>{{"query", true}, {"query_text", false}}));
ASSERT_OK_AND_ASSIGN(nested_path,
ColumnPathFromProtopathId("/query/query_text"));
EXPECT_EQ(
nested_path.PathSegments(),
(std::vector<PathSegment>{{"query", false}, {"query_text", false}}));
ASSERT_OK_AND_ASSIGN(nested_path, ColumnPathFromProtopathId("/query_count"));
EXPECT_EQ(nested_path.PathSegments(),
(std::vector<PathSegment>{{"query_count", false}}));
}
TEST(Parser, ParseTablePathWithIndexMarker) {
ASSERT_OK_AND_ASSIGN(TablePath path,
TablePathFromProtopathId("/query/doc[:]/url"));
EXPECT_EQ(path.PathSegments(),
(std::vector<PathSegment>{
{"query", false}, {"doc", true}, {"url", false}}));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/naming/protopath_id.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/naming/protopath_id_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ae4651cb-fc9e-4e95-b1e1-69875c74e05a | cpp | google/tensorstore | client_credentials | tensorstore/internal/grpc/client_credentials.cc | tensorstore/internal/grpc/client_credentials_test.cc | #include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/const_init.h"
#include "absl/synchronization/mutex.h"
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/context_resource_provider.h"
namespace tensorstore {
namespace {
ABSL_CONST_INIT static absl::Mutex credentials_mu(absl::kConstInit);
const internal::ContextResourceRegistration<GrpcClientCredentials>
grpc_client_credentials_registration;
}
bool GrpcClientCredentials::Use(
tensorstore::Context context,
std::shared_ptr<::grpc::ChannelCredentials> credentials) {
auto resource = context.GetResource<GrpcClientCredentials>().value();
absl::MutexLock l(&credentials_mu);
bool result = (resource->credentials_ == nullptr);
resource->credentials_ = std::move(credentials);
return result;
}
std::shared_ptr<::grpc::ChannelCredentials>
GrpcClientCredentials::Resource::GetCredentials() {
absl::MutexLock l(&credentials_mu);
if (credentials_) return credentials_;
return grpc::InsecureChannelCredentials();
}
} | #include "tensorstore/internal/grpc/client_credentials.h"
#include <memory>
#include <gtest/gtest.h>
#include "grpcpp/security/credentials.h"
#include "tensorstore/context.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::GrpcClientCredentials;
TEST(GrpcClientCredentials, Use) {
auto use = grpc::experimental::LocalCredentials(LOCAL_TCP);
auto ctx = tensorstore::Context::Default();
EXPECT_TRUE(GrpcClientCredentials::Use(ctx, use));
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_EQ(a.get(), use.get());
}
TEST(GrpcClientCredentials, Default) {
auto ctx = tensorstore::Context::Default();
auto a = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
auto b = ctx.GetResource<GrpcClientCredentials>().value()->GetCredentials();
EXPECT_NE(a.get(), b.get());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/client_credentials.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/grpc/client_credentials_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
0c69dc71-9b09-41e8-9d9b-4d75be731448 | cpp | tensorflow/tensorflow | sparse_tensor | tensorflow/core/util/sparse/sparse_tensor.cc | tensorflow/core/util/sparse/sparse_tensor_test.cc | #include "tensorflow/core/util/sparse/sparse_tensor.h"
#include "tensorflow/core/lib/strings/strcat.h"
namespace tensorflow {
namespace sparse {
namespace {
int UnsafeGetDimsFromIx(const Tensor& ix) {
DCHECK(TensorShapeUtils::IsMatrix(ix.shape()));
return ix.dim_size(1);
}
Status GetDimsFromIx(const Tensor& ix, int* result) {
if (!TensorShapeUtils::IsMatrix(ix.shape())) {
return errors::InvalidArgument("indices must be a matrix, but got: ",
ix.shape().DebugString());
}
*result = UnsafeGetDimsFromIx(ix);
return Status();
}
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
const VarDimArray order,
SparseTensor* result) {
if (ix.dtype() != DT_INT64) {
return errors::InvalidArgument("indices must be type int64 but got: ",
ix.dtype());
}
if (!TensorShapeUtils::IsVector(vals.shape())) {
return errors::InvalidArgument("vals must be a vec, but got: ",
vals.shape().DebugString());
}
if (ix.shape().dim_size(0) != vals.shape().dim_size(0)) {
return errors::InvalidArgument(
"indices and values rows (indexing "
"dimension) must match. (indices = ",
ix.shape().dim_size(0), ", values = ", vals.shape().dim_size(0), ")");
}
int dims = 0;
TF_RETURN_IF_ERROR(GetDimsFromIx(ix, &dims));
if (order.size() != dims) {
return errors::InvalidArgument("Order length must be SparseTensor rank.");
}
if (shape.size() != dims) {
return errors::InvalidArgument("Shape rank must be SparseTensor rank.");
}
result->ix_ = std::move(ix);
result->vals_ = std::move(vals);
result->shape_.assign(shape.begin(), shape.end());
result->order_.assign(order.begin(), order.end());
result->dims_ = dims;
return absl::OkStatus();
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
UndefinedOrder(TensorShapeToVector(shape)), result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const VarDimArray shape,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), shape, UndefinedOrder(shape),
result);
}
Status SparseTensor::Create(Tensor ix, Tensor vals,
const TensorShape& shape,
const VarDimArray order,
SparseTensor* result) {
return Create(std::move(ix), std::move(vals), TensorShapeToVector(shape),
order, result);
}
SparseTensor::SparseTensor(Tensor ix, Tensor vals, const VarDimArray shape,
const VarDimArray order)
: ix_(std::move(ix)),
vals_(std::move(vals)),
shape_(shape.begin(), shape.end()),
order_(order.begin(), order.end()),
dims_(UnsafeGetDimsFromIx(ix_)) {
DCHECK_EQ(ix_.dtype(), DT_INT64)
<< "indices must be type int64 but got: " << ix_.dtype();
DCHECK(TensorShapeUtils::IsVector(vals_.shape()))
<< "vals must be a vec, but got: " << vals_.shape().DebugString();
DCHECK_EQ(ix_.shape().dim_size(0), vals_.shape().dim_size(0))
<< "indices and values rows (indexing dimension) must match.";
DCHECK_EQ(order.size(), dims_) << "Order length must be SparseTensor rank.";
DCHECK_EQ(shape.size(), dims_) << "Shape rank must be SparseTensor rank.";
}
bool SparseTensor::IndicesValidVectorFastPath() const {
DCHECK_EQ(shape_.size(), 1);
DCHECK_EQ(order_[0], 0);
const int64_t max_index = shape_[0];
bool index_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const index_base_ptr = ix_t.data();
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int64_t index = index_base_ptr[n];
index_in_range_valid = index_in_range_valid & (index < max_index);
order_valid = order_valid & (index > prev_index);
prev_index = index;
}
return index_in_range_valid & order_valid;
}
bool SparseTensor::IndicesValidMatrix32BitFastPath() const {
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const shape_ptr = shape_.data();
DCHECK_EQ(shape_.size(), 2);
DCHECK_EQ(order_[0], 0);
DCHECK_EQ(order_[1], 1);
DCHECK_LE(shape_ptr[0], std::numeric_limits<int32>::max());
DCHECK_LE(shape_ptr[1], std::numeric_limits<int32>::max());
const int32_t max_rows = static_cast<int32>(shape_ptr[0]);
const int32_t max_cols = static_cast<int32>(shape_ptr[1]);
bool row_zeros_valid = true;
bool row_in_range_valid = true;
bool col_zeros_valid = true;
bool col_in_range_valid = true;
bool order_valid = true;
int64_t prev_index = -1;
const int32* const index_base_ptr =
reinterpret_cast<const int32*>(ix_t.data());
const size_t kInt32ElementsPerRow = 4;
for (std::size_t n = 0; n < ix_t.dimension(0); ++n) {
const int32* const index_ptr = index_base_ptr + n * kInt32ElementsPerRow;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const int32 row_zeros = index_ptr[0];
const int32 row_32 = index_ptr[1];
const int32 col_zeros = index_ptr[2];
const int32 col_32 = index_ptr[3];
#else
const int32_t row_32 = index_ptr[0];
const int32_t row_zeros = index_ptr[1];
const int32_t col_32 = index_ptr[2];
const int32_t col_zeros = index_ptr[3];
#endif
row_zeros_valid = row_zeros_valid & (row_zeros == 0);
col_zeros_valid = col_zeros_valid & (col_zeros == 0);
row_in_range_valid =
row_in_range_valid & (row_32 >= 0) & (row_32 < max_rows);
col_in_range_valid =
col_in_range_valid & (col_32 >= 0) & (col_32 < max_cols);
const int64_t concatenated_index =
(static_cast<int64_t>(row_32) << 32) + col_32;
order_valid = order_valid & (concatenated_index > prev_index);
prev_index = concatenated_index;
}
return row_zeros_valid & row_in_range_valid & col_zeros_valid &
col_in_range_valid & order_valid;
}
template <bool standard_order>
Status SparseTensor::IndicesValidHelper() const {
const auto ix_t = ix_.matrix<int64_t>();
const int64_t* const shape_ptr = shape_.data();
for (std::size_t n = 0; n < num_entries(); ++n) {
bool valid = true;
bool different = false;
bool increasing = true;
if (n == 0) {
for (int di = 0; di < dims_; ++di) {
if (ix_t(n, di) < 0 || ix_t(n, di) >= shape_ptr[di]) valid = false;
}
different = true;
} else {
for (int di = 0; di < dims_; ++di) {
if (ix_t(n, di) < 0 || ix_t(n, di) >= shape_ptr[di]) valid = false;
int ordered_dim;
if (standard_order) {
ordered_dim = di;
} else {
ordered_dim = order_[di];
}
int64_t diff = ix_t(n, ordered_dim) - ix_t(n - 1, ordered_dim);
if (diff > 0) different = true;
if (!different && diff < 0) increasing = false;
}
}
if (TF_PREDICT_FALSE(!valid || !increasing || !different)) {
string index = strings::StrCat("indices[", n, "] = [");
for (int di = 0; di < dims_; ++di) {
strings::StrAppend(&index, ix_t(n, di), di < dims_ - 1 ? "," : "]");
}
if (!valid) {
return errors::InvalidArgument(index,
" is out of bounds: need 0 <= index < [",
absl::StrJoin(shape_, ","), "]");
}
if (!increasing) {
return errors::InvalidArgument(
index,
" is out of order. Many sparse ops require sorted indices.\n"
" Use `tf.sparse.reorder` to create a correctly ordered copy."
"\n\n");
}
if (!different) {
return errors::InvalidArgument(index, " is repeated");
}
}
}
return absl::OkStatus();
}
Status SparseTensor::IndicesValid() const {
if (shape_.size() == 1 && IndicesValidVectorFastPath()) {
return absl::OkStatus();
}
bool standard_order = true;
for (size_t i = 0; i < order_.size(); ++i) {
if (order_[i] < 0) {
return errors::FailedPrecondition(
"Order was not provided. Provide an order at "
"construction time or run ReorderInPlace");
}
standard_order = standard_order && order_[i] == i;
}
if (standard_order) {
if (shape_.size() == 1) {
if (IndicesValidVectorFastPath()) {
return absl::OkStatus();
}
} else if (shape_.size() == 2 &&
shape_[0] <= std::numeric_limits<int32>::max() &&
shape_[1] <= std::numeric_limits<int32>::max()) {
if (IndicesValidMatrix32BitFastPath()) {
return absl::OkStatus();
}
}
return IndicesValidHelper<true>();
} else {
return IndicesValidHelper<false>();
}
}
}
} | #include "tensorflow/core/util/sparse/sparse_tensor.h"
#include <string>
#include <vector>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace sparse {
namespace {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex>
GetSimpleIndexTensor(int N, const int NDIM) {
Eigen::Tensor<int64_t, 2, Eigen::RowMajor, Eigen::DenseIndex> ix(N, NDIM);
ix(0, 0) = 0;
ix(0, 1) = 0;
ix(0, 2) = 0;
ix(1, 0) = 3;
ix(1, 1) = 0;
ix(1, 2) = 0;
ix(2, 0) = 2;
ix(2, 1) = 0;
ix(2, 2) = 0;
ix(3, 0) = 0;
ix(3, 1) = 1;
ix(3, 2) = 0;
ix(4, 0) = 0;
ix(4, 1) = 0;
ix(4, 2) = 2;
return ix;
}
TEST(SparseTensorTest, DimComparatorSorts) {
int64_t N = 5;
const int NDIM = 3;
auto ix = GetSimpleIndexTensor(N, NDIM);
TTypes<int64_t>::Matrix map(ix.data(), N, NDIM);
std::vector<int64_t> sorting(N);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::vector<int64_t> order{0, 1, 2};
std::vector<int64_t> shape{N, N, N};
DimComparator sorter(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
FixedDimComparator<3> sorter_fixed(map, order, shape);
std::sort(sorting.begin(), sorting.end(), sorter_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 4, 3, 2, 1}));
std::vector<int64_t> order1{2, 0, 1};
DimComparator sorter1(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
FixedDimComparator<3> sorter1_fixed(map, order1, shape);
for (std::size_t n = 0; n < N; ++n) sorting[n] = n;
std::sort(sorting.begin(), sorting.end(), sorter1_fixed);
EXPECT_EQ(sorting, std::vector<int64_t>({0, 3, 2, 1, 4}));
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesType) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT32, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidIndicesShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM, 1}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidValues) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N, 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidN) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N - 1}));
SparseTensor result;
EXPECT_EQ(SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1, 2},
&result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidOrder) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10, 10}), {0, 1}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorInvalidShape) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
SparseTensor result;
EXPECT_EQ(
SparseTensor::Create(ix, vals, TensorShape({10, 10}), {0, 1, 2}, &result)
.code(),
error::INVALID_ARGUMENT);
}
TEST(SparseTensorTest, SparseTensorConstruction) {
int N = 5;
const int NDIM = 3;
auto ix_c = GetSimpleIndexTensor(N, NDIM);
Eigen::Tensor<tstring, 1, Eigen::RowMajor> vals_c(N);
vals_c(0) = "hi0";
vals_c(1) = "hi1";
vals_c(2) = "hi2";
vals_c(3) = "hi3";
vals_c(4) = "hi4";
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
vals_t = vals_c;
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ(
"indices[2] = [2,0,0] is out of order. "
"Many sparse ops require sorted indices.\n"
" Use `tf.sparse.reorder` to create a correctly ordered copy."
"\n\n",
st_indices_valid.message());
st.Reorder<tstring>({2, 0, 1});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi3");
EXPECT_EQ(vals_t(2), "hi2");
EXPECT_EQ(vals_t(3), "hi1");
EXPECT_EQ(vals_t(4), "hi4");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({0, 1, 2});
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(vals_t(0), "hi0");
EXPECT_EQ(vals_t(1), "hi4");
EXPECT_EQ(vals_t(2), "hi3");
EXPECT_EQ(vals_t(3), "hi2");
EXPECT_EQ(vals_t(4), "hi1");
ix_t = ix_c;
vals_t = vals_c;
st.Reorder<tstring>({2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, EmptySparseTensorAllowed) {
int N = 0;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
std::vector<int64_t> shape{10, 10, 10};
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), order);
std::vector<int64_t> new_order{1, 0, 2};
st.Reorder<tstring>(new_order);
TF_EXPECT_OK(st.IndicesValid());
EXPECT_EQ(st.order(), new_order);
}
TEST(SparseTensorTest, SortingWorksCorrectly) {
int N = 30;
const int NDIM = 4;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({1000, 1000, 1000, 1000});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
auto ix_t = ix.matrix<int64_t>();
for (int n = 0; n < 100; ++n) {
ix_t = ix_t.random(Eigen::internal::UniformRandomGenerator<int64_t>(n + 1));
ix_t = ix_t.abs() % 1000;
st.Reorder<tstring>({0, 1, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 2, 1, 0});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({1, 0, 2, 3});
TF_EXPECT_OK(st.IndicesValid());
st.Reorder<tstring>({3, 0, 2, 1});
TF_EXPECT_OK(st.IndicesValid());
}
}
TEST(SparseTensorTest, ValidateIndicesFindsInvalid) {
int N = 2;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
Eigen::Tensor<int64_t, 2, Eigen::RowMajor> ix_orig(N, NDIM);
ix_orig(0, 0) = 0;
ix_orig(0, 1) = 0;
ix_orig(0, 2) = 0;
ix_orig(1, 0) = 0;
ix_orig(1, 1) = 0;
ix_orig(1, 2) = 0;
auto ix_t = ix.matrix<int64_t>();
ix_t = ix_orig;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,0] is repeated", st_indices_valid.message());
ix_orig(1, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_orig(0, 2) = 1;
ix_t = ix_orig;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("indices[1] = [0,0,1] is repeated", st_indices_valid.message());
}
TEST(SparseTensorTest, SparseTensorCheckBoundaries) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
ix.matrix<int64_t>() = ix_t;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
ix_t(0, 0) = 11;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
Status st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[11,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = -1;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
st_indices_valid = st.IndicesValid();
EXPECT_FALSE(st_indices_valid.ok());
EXPECT_EQ("[-1,0,0] is out of bounds: need 0 <= index < [10,10,10]",
st_indices_valid.message().substr(13));
ix_t(0, 0) = 0;
ix.matrix<int64_t>() = ix_t;
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
}
TEST(SparseTensorTest, SparseTensorToDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({4, 4, 5}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
}
TEST(SparseTensorTest, SparseTensorToLargerDenseTensor) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_t = GetSimpleIndexTensor(N, NDIM);
auto vals_t = vals.vec<tstring>();
ix.matrix<int64_t>() = ix_t;
vals_t(0) = "hi0";
vals_t(1) = "hi1";
vals_t(2) = "hi2";
vals_t(3) = "hi3";
vals_t(4) = "hi4";
TensorShape shape({4, 4, 5});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
Tensor dense(DT_STRING, TensorShape({10, 10, 10}));
st.ToDense<tstring>(&dense);
auto dense_t = dense.tensor<tstring, 3>();
Eigen::array<Eigen::DenseIndex, NDIM> ix_n;
for (int n = 0; n < N; ++n) {
for (int d = 0; d < NDIM; ++d) ix_n[d] = ix_t(n, d);
EXPECT_EQ(dense_t(ix_n), vals_t(n));
}
EXPECT_EQ(dense_t(0, 0, 1), "");
EXPECT_EQ(dense_t(0, 0, 3), "");
EXPECT_EQ(dense_t(3, 3, 3), "");
EXPECT_EQ(dense_t(3, 3, 4), "");
EXPECT_EQ(dense_t(9, 0, 0), "");
EXPECT_EQ(dense_t(9, 0, 9), "");
EXPECT_EQ(dense_t(9, 9, 9), "");
}
TEST(SparseTensorTest, SparseTensorGroup) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_INT32, TensorShape({N}));
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<int32>();
ix_t = GetSimpleIndexTensor(N, NDIM);
vals_t(0) = 1;
vals_t(1) = 2;
vals_t(2) = 3;
vals_t(3) = 4;
vals_t(4) = 5;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
st.Reorder<int32>(order);
std::vector<std::vector<int64_t> > groups;
std::vector<TTypes<int64_t>::UnalignedConstMatrix> grouped_indices;
std::vector<TTypes<int32>::UnalignedVec> grouped_values;
auto gi = st.group({0});
for (const auto& g : gi) {
groups.push_back(g.group());
VLOG(1) << "Group: " << absl::StrJoin(g.group(), ",");
VLOG(1) << "Indices: " << g.indices();
VLOG(1) << "Values: " << g.values<int32>();
grouped_indices.push_back(g.indices());
grouped_values.push_back(g.values<int32>());
}
EXPECT_EQ(groups.size(), 3);
EXPECT_EQ(groups[0], std::vector<int64_t>({0}));
EXPECT_EQ(groups[1], std::vector<int64_t>({2}));
EXPECT_EQ(groups[2], std::vector<int64_t>({3}));
std::vector<Eigen::Tensor<int64_t, 2, Eigen::RowMajor> > expected_indices;
std::vector<Eigen::Tensor<int32, 1, Eigen::RowMajor> > expected_vals;
expected_indices.emplace_back(3, NDIM);
expected_vals.emplace_back(3);
expected_indices[0].setZero();
expected_indices[0](1, 2) = 2;
expected_indices[0](2, 1) = 1;
expected_vals[0].setConstant(-1);
expected_vals[0](0) = 1;
expected_vals[0](1) = 5;
expected_vals[0](2) = 4;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[1].setZero();
expected_indices[1](0, 0) = 2;
expected_vals[1](0) = 3;
expected_indices.emplace_back(1, NDIM);
expected_vals.emplace_back(1);
expected_indices[2].setZero();
expected_indices[2](0, 0) = 3;
expected_vals[2](0) = 2;
for (std::size_t gix = 0; gix < groups.size(); ++gix) {
auto gi_t = grouped_indices[gix];
Eigen::Tensor<bool, 0, Eigen::RowMajor> eval =
(gi_t == expected_indices[gix]).all();
EXPECT_TRUE(eval()) << gix << " indices: " << gi_t << " vs. "
<< expected_indices[gix];
auto gv_t = grouped_values[gix];
eval = (gv_t == expected_vals[gix]).all();
EXPECT_TRUE(eval()) << gix << " values: " << gv_t << " vs. "
<< expected_vals[gix];
}
}
TEST(SparseTensorTest, Concat) {
int N = 5;
const int NDIM = 3;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
auto ix_c = GetSimpleIndexTensor(N, NDIM);
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
ix_t = ix_c;
TensorShape shape({10, 10, 10});
std::vector<int64_t> order{0, 1, 2};
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
EXPECT_FALSE(st.IndicesValid().ok());
st.Reorder<tstring>(order);
TF_EXPECT_OK(st.IndicesValid());
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st, st});
EXPECT_EQ(concatted.order(), st.order());
absl::InlinedVector<int64_t, 8UL> expected_shape{40, 10, 10};
EXPECT_EQ(concatted.shape(), expected_shape);
EXPECT_EQ(concatted.num_entries(), 4 * N);
TF_EXPECT_OK(concatted.IndicesValid());
auto conc_ix_t = concatted.indices().matrix<int64_t>();
auto conc_vals_t = concatted.values().vec<tstring>();
for (int n = 0; n < 4; ++n) {
for (int i = 0; i < N; ++i) {
EXPECT_EQ(conc_ix_t(n * N + i, 0), 10 * n + ix_t(i, 0));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_ix_t(n * N + i, 1), ix_t(i, 1));
EXPECT_EQ(conc_vals_t(n * N + i), vals_t(i));
}
}
SparseTensor st_ooo;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 2, 1},
&st_ooo));
SparseTensor conc_ooo = SparseTensor::Concat<tstring>({st, st, st, st_ooo});
std::vector<int64_t> expected_ooo{-1, -1, -1};
EXPECT_EQ(conc_ooo.order(), expected_ooo);
EXPECT_EQ(conc_ooo.shape(), expected_shape);
EXPECT_EQ(conc_ooo.num_entries(), 4 * N);
}
TEST(SparseTensorTest, ConcatEmptyN) {
constexpr int N = 0;
constexpr int NDIM = 2;
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape({10, 10});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, {0, 1}, &st));
SparseTensor concatted = SparseTensor::Concat<tstring>({st, st, st});
EXPECT_EQ(concatted.num_entries(), 0);
}
TEST(SparseTensorTest, Split) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<SparseTensor> st_list;
TF_ASSERT_OK(SparseTensor::Split<int64_t>(st, 0, 2, &st_list));
EXPECT_EQ(st_list.size(), 2);
auto expected_shape = absl::InlinedVector<int64_t, 8UL>{2, 3};
EXPECT_EQ(st_list[0].shape(), expected_shape);
EXPECT_EQ(st_list[0].values().NumElements(), 3);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(0), 1);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(1), 2);
EXPECT_EQ(st_list[0].values().vec<int64_t>()(2), 3);
EXPECT_EQ(st_list[0].indices().NumElements(), 6);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(st_list[0].indices().matrix<int64_t>()(2, 1), 2);
EXPECT_EQ(st_list[1].shape(), expected_shape);
EXPECT_EQ(st_list[1].values().NumElements(), 1);
EXPECT_EQ(st_list[1].values().vec<int64_t>()(0), 4);
EXPECT_EQ(st_list[1].indices().NumElements(), 2);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 0), 1);
EXPECT_EQ(st_list[1].indices().matrix<int64_t>()(0, 1), 0);
}
TEST(SparseTensorTest, Slice) {
const int N = 4;
const int DIM = 2;
Tensor ids(DT_INT64, TensorShape({N, DIM}));
Tensor vals(DT_INT64, TensorShape({N}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
ids.matrix<int64_t>()(2, 0) = 1;
ids.matrix<int64_t>()(2, 1) = 2;
ids.matrix<int64_t>()(3, 0) = 3;
ids.matrix<int64_t>()(3, 1) = 0;
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
vals.vec<int64_t>()(2) = 3;
vals.vec<int64_t>()(3) = 4;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals, TensorShape({4, 3}), &st));
std::vector<int64_t> start(2, 0);
std::vector<int64_t> size(2);
size[0] = 2;
size[1] = 3;
TF_ASSERT_OK_AND_ASSIGN(SparseTensor slice,
SparseTensor::Slice<int64_t>(st, start, size));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({2, 3}));
EXPECT_EQ(slice.values().NumElements(), 3);
EXPECT_EQ(slice.values().vec<int64_t>()(0), 1);
EXPECT_EQ(slice.values().vec<int64_t>()(1), 2);
EXPECT_EQ(slice.values().vec<int64_t>()(2), 3);
EXPECT_EQ(slice.indices().NumElements(), 6);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 0), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(0, 1), 0);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(1, 1), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 0), 1);
EXPECT_EQ(slice.indices().matrix<int64_t>()(2, 1), 2);
}
TEST(SparseTensorTest, SliceReducesOutputDimension) {
const int num_rows = 2;
const int num_columns = 2;
Tensor ids(DT_INT64, TensorShape({num_rows, num_columns}));
ids.matrix<int64_t>()(0, 0) = 0;
ids.matrix<int64_t>()(0, 1) = 0;
ids.matrix<int64_t>()(1, 0) = 1;
ids.matrix<int64_t>()(1, 1) = 1;
Tensor vals(DT_INT64, TensorShape({2}));
vals.vec<int64_t>()(0) = 1;
vals.vec<int64_t>()(1) = 2;
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ids, vals,
TensorShape({num_rows, num_columns}), &st));
TF_ASSERT_OK_AND_ASSIGN(
SparseTensor slice,
SparseTensor::Slice<int64_t>(st, {num_rows + 1, 1}, {1, num_columns}));
EXPECT_EQ(TensorShape(slice.shape()), TensorShape({0, 1}));
}
TEST(SparseTensorTest, Dim0SparseTensorToDenseTensor) {
Tensor ix(DT_INT64, TensorShape({1, 0}));
Tensor vals(DT_INT32, TensorShape({1}));
vals.scalar<int32>()() = 5;
TensorShape shape({});
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, &st));
Tensor dense(DT_INT32, TensorShape({}));
st.ToDense<int32>(&dense);
EXPECT_EQ(dense.scalar<int32>()(), 5);
}
static void BM_SparseReorderFloat(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_FLOAT, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
auto ix_t = ix.matrix<int64_t>();
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<float>(reorder);
}
}
static void BM_SparseReorderString(::testing::benchmark::State& state) {
int N32 = state.range(0);
int NDIM32 = state.range(1);
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
const int64_t NDIM = static_cast<int64_t>(NDIM32);
const int64_t N = static_cast<int64_t>(N32);
Tensor ix(DT_INT64, TensorShape({N, NDIM}));
Tensor vals(DT_STRING, TensorShape({N}));
TensorShape shape;
std::vector<int64_t> order;
auto ix_t = ix.matrix<int64_t>();
auto vals_t = vals.vec<tstring>();
for (int i = 0; i < N32; ++i) {
int len = rnd.Rand32() % 1000;
vals_t(i).resize(len);
}
for (int d = 0; d < NDIM32; ++d) {
shape.AddDim(1000);
order.push_back(d);
}
std::vector<int64_t> reorder;
reorder.push_back(1);
reorder.push_back(0);
for (int d = 2; d < NDIM32; ++d) {
reorder.push_back(d);
}
for (auto s : state) {
state.PauseTiming();
for (int64_t i = 0; i < N; ++i) {
for (int d = 0; d < NDIM32; ++d) {
ix_t(i, d) = rnd.Rand64() % 1000;
}
}
SparseTensor st;
TF_ASSERT_OK(SparseTensor::Create(ix, vals, shape, order, &st));
state.ResumeTiming();
st.Reorder<tstring>(reorder);
}
}
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 2);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(10000, 3);
BENCHMARK(BM_SparseReorderFloat)->UseRealTime()->ArgPair(100000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 2);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(100, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(1000, 3);
BENCHMARK(BM_SparseReorderString)->UseRealTime()->ArgPair(10000, 3);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/sparse/sparse_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/sparse/sparse_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98f4b3b6-d6bd-46a5-be18-068ea7853607 | cpp | tensorflow/tensorflow | function_api_info | tensorflow/core/grappler/optimizers/function_api_info.cc | tensorflow/core/grappler/optimizers/function_api_info_test.cc | #include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace grappler {
FunctionApiInfo::FunctionApiInfo() {}
FunctionApiInfo::~FunctionApiInfo() {}
Status FunctionApiInfo::Init(const FunctionDef& function_def) {
function_type_ = FunctionApiInfo::FunctionType::INFERENCE;
for (const auto& attr : function_def.attr()) {
if (attr.first == "api_preferred_device") {
preferred_device_ = attr.second.s();
}
if (attr.first == "api_implements") {
interface_name_ = attr.second.s();
}
if (attr.first == "forward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::BACKWARD;
pairing_function_name_ = attr.second.s();
}
if (attr.first == "backward_function_name") {
function_type_ = FunctionApiInfo::FunctionType::FORWARD;
pairing_function_name_ = attr.second.s();
}
}
input_arg_dtypes_.reserve(function_def.signature().input_arg_size());
for (const auto& input_arg : function_def.signature().input_arg()) {
input_arg_dtypes_.emplace_back(input_arg.type());
}
output_arg_dtypes_.reserve(function_def.signature().output_arg_size());
for (const auto& output_arg : function_def.signature().output_arg()) {
output_arg_dtypes_.emplace_back(output_arg.type());
}
if (interface_name_.empty() && !preferred_device_.empty()) {
return errors::InvalidArgument(
"Function '", function_def.signature().name(),
"' has a preferred device, but does not implement an interface");
}
return absl::OkStatus();
}
const string& FunctionApiInfo::preferred_device() const {
return preferred_device_;
}
const string& FunctionApiInfo::interface_name() const {
return interface_name_;
}
const FunctionApiInfo::FunctionType FunctionApiInfo::function_type() const {
return function_type_;
}
const string& FunctionApiInfo::pairing_function_name() const {
return pairing_function_name_;
}
const DataTypeVector& FunctionApiInfo::input_arg_dtypes() const {
return input_arg_dtypes_;
}
const DataTypeVector& FunctionApiInfo::output_arg_dtypes() const {
return output_arg_dtypes_;
}
FunctionLibraryApiInfo::FunctionLibraryApiInfo() {}
FunctionLibraryApiInfo::~FunctionLibraryApiInfo() {}
namespace {
bool IsSameArgDef(const OpDef::ArgDef& arg1, const OpDef::ArgDef& arg2) {
if (arg1.type() != arg2.type()) return false;
if (arg1.type_attr() != arg2.type_attr()) return false;
if (arg1.number_attr() != arg2.number_attr()) return false;
if (arg1.type_list_attr() != arg2.type_list_attr()) return false;
if (arg1.is_ref() != arg2.is_ref()) return false;
return true;
}
bool IsSameSignature(const FunctionDef& f1, const FunctionDef& f2,
const bool check_inputs, const bool check_outputs) {
const auto& sig1 = f1.signature();
const auto& sig2 = f2.signature();
if (check_inputs) {
if (sig1.input_arg_size() != sig2.input_arg_size()) return false;
for (int k = 0; k < sig1.input_arg_size(); ++k) {
if (!IsSameArgDef(sig1.input_arg(k), sig2.input_arg(k))) return false;
}
}
if (check_outputs) {
if (f1.ret().size() != f2.ret().size()) return false;
if (sig1.output_arg_size() != sig2.output_arg_size()) return false;
for (int k = 0; k < sig1.output_arg_size(); ++k) {
if (!IsSameArgDef(sig1.output_arg(k), sig2.output_arg(k))) return false;
}
}
return true;
}
Status ValidateSignature(const string& interface_name,
const std::vector<const FunctionDef*>& equiv_funcs,
const FunctionApiInfo::FunctionType function_type) {
if (equiv_funcs.size() < 2) return absl::OkStatus();
for (size_t k = 1; k < equiv_funcs.size(); ++k) {
const bool check_input =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::FORWARD);
const bool check_output =
(function_type == FunctionApiInfo::FunctionType::INFERENCE ||
function_type == FunctionApiInfo::FunctionType::BACKWARD);
if (!IsSameSignature(*equiv_funcs[0], *equiv_funcs[k], check_input,
check_output)) {
return errors::InvalidArgument(
"Functions '", equiv_funcs[0]->signature().name(), "' and '",
equiv_funcs[k]->signature().name(), "' both implement '",
interface_name, "' but their signatures do not match.");
}
}
return absl::OkStatus();
}
Status ValidateSignatures(
const std::unordered_map<string, std::vector<const FunctionDef*>>&
intf_to_func,
const FunctionApiInfo::FunctionType function_type) {
for (const auto& item : intf_to_func)
TF_RETURN_IF_ERROR(
ValidateSignature(item.first, item.second, function_type));
return absl::OkStatus();
}
}
Status FunctionLibraryApiInfo::Init(
const FunctionDefLibrary& function_library) {
std::unordered_map<string, std::vector<const FunctionDef*>> infer_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> fwd_funcs;
std::unordered_map<string, std::vector<const FunctionDef*>> bwd_funcs;
for (const auto& function : function_library.function()) {
std::unique_ptr<FunctionApiInfo> func_info(new FunctionApiInfo);
TF_RETURN_IF_ERROR(func_info->Init(function));
if (func_info->interface_name().empty()) continue;
const string& function_name = function.signature().name();
const string& interface_name = func_info->interface_name();
VLOG(3) << "Got " << func_info->function_type()
<< " function: " << function_name
<< " with interface: " << interface_name;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
intf_to_inference_funcs_[interface_name].emplace_back(function_name);
infer_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::FORWARD:
intf_to_forward_funcs_[interface_name].emplace_back(function_name);
fwd_funcs[interface_name].emplace_back(&function);
break;
case FunctionApiInfo::FunctionType::BACKWARD:
intf_to_backward_funcs_[interface_name].emplace_back(function_name);
bwd_funcs[interface_name].emplace_back(&function);
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
func_info_[function_name] = std::move(func_info);
}
TF_RETURN_IF_ERROR(ValidateSignatures(
infer_funcs, FunctionApiInfo::FunctionType::INFERENCE));
TF_RETURN_IF_ERROR(
ValidateSignatures(fwd_funcs, FunctionApiInfo::FunctionType::FORWARD));
TF_RETURN_IF_ERROR(
ValidateSignatures(bwd_funcs, FunctionApiInfo::FunctionType::BACKWARD));
return absl::OkStatus();
}
Status FunctionLibraryApiInfo::GetEquivalentImplementations(
const string& function_name, std::vector<string>* other_functions) const {
const auto func_it = func_info_.find(function_name);
if (func_it == func_info_.end()) return absl::OkStatus();
const FunctionApiInfo* func_info = func_it->second.get();
absl::flat_hash_map<string, std::vector<string>>::const_iterator it;
switch (func_info->function_type()) {
case FunctionApiInfo::FunctionType::INFERENCE:
it = intf_to_inference_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::FORWARD:
it = intf_to_forward_funcs_.find(func_info->interface_name());
break;
case FunctionApiInfo::FunctionType::BACKWARD:
it = intf_to_backward_funcs_.find(func_info->interface_name());
break;
default:
return errors::InvalidArgument("Unrecognized function type: ",
func_info->function_type());
}
for (const auto& func_name : it->second) {
if (func_name == function_name) continue;
other_functions->emplace_back(func_name);
}
return absl::OkStatus();
}
const FunctionApiInfo* FunctionLibraryApiInfo::GetApiInfo(
const string& function_name) const {
const auto it = func_info_.find(function_name);
if (it == func_info_.end()) return nullptr;
return it->second.get();
}
}
} | #include "tensorflow/core/grappler/optimizers/function_api_info.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void SetArg(const string& name, const string& type_name,
OpDef::ArgDef* arg_def) {
arg_def->set_name(name);
arg_def->set_type_attr(type_name);
}
typedef std::pair<string, string> ArgSpec;
void SetArgs(const std::vector<ArgSpec>& input_args_spec,
const std::vector<ArgSpec>& output_args_spec, OpDef* sig) {
for (const auto& arg_spec : input_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_input_arg());
for (const auto& arg_spec : output_args_spec)
SetArg(arg_spec.first, arg_spec.second, sig->add_output_arg());
}
void PopulateFunction(const string& name, const string& api_interface_name,
const string& preferred_device,
const std::vector<ArgSpec>& input_args,
const std::vector<ArgSpec>& output_args,
const string& forward_function_name,
const string& backward_function_name,
FunctionDef* func_def) {
OpDef* sig = func_def->mutable_signature();
sig->set_name(name);
SetArgs(input_args, output_args, sig);
auto* func_attr = func_def->mutable_attr();
if (!api_interface_name.empty())
(*func_attr)["api_implements"].set_s(api_interface_name);
if (!preferred_device.empty())
(*func_attr)["api_preferred_device"].set_s(preferred_device);
if (!forward_function_name.empty())
(*func_attr)["forward_function_name"].set_s(forward_function_name);
if (!backward_function_name.empty())
(*func_attr)["backward_function_name"].set_s(backward_function_name);
}
void PopulateSampleLibrary(const bool mismatch_args,
FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> func_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> func_wrong_args{{"in1", "int32"},
{"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU",
mismatch_args ? func_wrong_args : func_args, output_args, "",
"", func_lib->add_function());
PopulateFunction("DoThings", "DoThings", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("OneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
PopulateFunction("AnotherOneOff", "", "", func_args, output_args, "", "",
func_lib->add_function());
}
void PopulateComplexLibrary(FunctionDefLibrary* func_lib) {
const std::vector<ArgSpec> input_args{{"in1", "float32"}, {"in2", "int32"}};
const std::vector<ArgSpec> output_args{{"out", "float32"}};
const std::vector<ArgSpec> output_with_state{
{"out", "float32"}, {"state1", "int32"}, {"state2", "int32"}};
PopulateFunction("DoStuffCpu", "DoStuff", "CPU", input_args, output_args, "",
"DoStuffCpu_gradient", func_lib->add_function());
PopulateFunction("DoStuffCpu_gradient", "DoStuff", "CPU", output_args,
input_args, "DoStuffCpu", "", func_lib->add_function());
PopulateFunction("DoStuffGpu", "DoStuff", "GPU", input_args,
output_with_state, "", "DoStuffGpu_gradient",
func_lib->add_function());
PopulateFunction("DoStuffGpu_gradient", "DoStuff", "GPU", output_with_state,
input_args, "DoStuffGpu", "", func_lib->add_function());
}
bool CheckEquivImpl(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name,
const std::vector<string>& expected_other) {
std::vector<string> other_impl;
Status status =
lib_api_info.GetEquivalentImplementations(func_name, &other_impl);
EXPECT_EQ(status, absl::OkStatus());
const std::unordered_set<string> actual(other_impl.begin(), other_impl.end());
const std::unordered_set<string> expected(expected_other.begin(),
expected_other.end());
return actual == expected;
}
string GetInterfaceName(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->interface_name();
}
string GetPreferredDevice(const FunctionLibraryApiInfo& lib_api_info,
const string& func_name) {
auto* info = lib_api_info.GetApiInfo(func_name);
CHECK_NOTNULL(info);
return info->preferred_device();
}
TEST(FunctionApiInfoTest, ParseTags) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( false, &func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoThings", GetInterfaceName(lib_api_info, "DoThings"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("", GetPreferredDevice(lib_api_info, "DoThings"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "OneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "AnotherOneOff", {}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoThings", {}));
}
TEST(FunctionApiInfoTest, ComplexFunctionLib) {
FunctionDefLibrary func_lib;
PopulateComplexLibrary(&func_lib);
FunctionLibraryApiInfo lib_api_info;
TF_ASSERT_OK(lib_api_info.Init(func_lib));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu_gradient"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu_gradient"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu_gradient",
{"DoStuffGpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu_gradient",
{"DoStuffCpu_gradient"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "Undefined", {}));
}
TEST(FunctionApiInfoTest, MismatchedArguments) {
FunctionDefLibrary func_lib;
PopulateSampleLibrary( true, &func_lib);
FunctionLibraryApiInfo lib_api_info;
const Status ret = lib_api_info.Init(func_lib);
EXPECT_FALSE(ret.ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_api_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/function_api_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
49a0b7e1-3c93-45c7-a6f6-1ca098732f77 | cpp | tensorflow/tensorflow | nnapi_handler | tensorflow/lite/nnapi/nnapi_handler.cc | tensorflow/lite/nnapi/nnapi_handler_test.cc | #include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdio>
#include <string>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
const char NnApiHandler::kNnapiReferenceDeviceName[] = "nnapi-reference";
const int NnApiHandler::kNnapiReferenceDevice = 1;
const int NnApiHandler::kNnapiDevice = 2;
char* NnApiHandler::nnapi_device_name_ = nullptr;
int NnApiHandler::nnapi_device_feature_level_;
const NnApi* NnApiPassthroughInstance() {
static const NnApi orig_nnapi_copy = *NnApiImplementation();
return &orig_nnapi_copy;
}
NnApiHandler* NnApiHandler::Instance() {
NnApiPassthroughInstance();
static NnApiHandler handler{const_cast<NnApi*>(NnApiImplementation())};
return &handler;
}
void NnApiHandler::Reset() {
*nnapi_ = *NnApiPassthroughInstance();
}
void NnApiHandler::SetAndroidSdkVersion(int version,
bool set_unsupported_ops_to_null) {
nnapi_->android_sdk_version = version;
nnapi_->nnapi_runtime_feature_level = version;
if (!set_unsupported_ops_to_null) {
return;
}
if (version < 29) {
nnapi_->ANeuralNetworks_getDeviceCount = nullptr;
nnapi_->ANeuralNetworks_getDevice = nullptr;
nnapi_->ANeuralNetworksDevice_getName = nullptr;
nnapi_->ANeuralNetworksDevice_getVersion = nullptr;
nnapi_->ANeuralNetworksDevice_getFeatureLevel = nullptr;
nnapi_->ANeuralNetworksDevice_getType = nullptr;
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_createForDevices = nullptr;
nnapi_->ANeuralNetworksCompilation_setCaching = nullptr;
nnapi_->ANeuralNetworksExecution_compute = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandRank = nullptr;
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions = nullptr;
nnapi_->ANeuralNetworksBurst_create = nullptr;
nnapi_->ANeuralNetworksBurst_free = nullptr;
nnapi_->ANeuralNetworksExecution_burstCompute = nullptr;
nnapi_->ANeuralNetworksMemory_createFromAHardwareBuffer = nullptr;
nnapi_->ANeuralNetworksExecution_setMeasureTiming = nullptr;
nnapi_->ANeuralNetworksExecution_getDuration = nullptr;
nnapi_->ANeuralNetworksDevice_getExtensionSupport = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperandType = nullptr;
nnapi_->ANeuralNetworksModel_getExtensionOperationType = nullptr;
nnapi_->ANeuralNetworksModel_setOperandExtensionData = nullptr;
}
if (version < 28) {
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16 = nullptr;
}
}
void NnApiHandler::SetDeviceName(const std::string& name) {
delete[] nnapi_device_name_;
nnapi_device_name_ = new char[name.size() + 1];
std::strcpy(nnapi_device_name_, name.c_str());
}
void NnApiHandler::GetDeviceNameReturnsName(const std::string& name) {
NnApiHandler::SetDeviceName(name);
GetDeviceNameReturns<0>();
}
void NnApiHandler::SetNnapiSupportedDevice(const std::string& name,
int feature_level) {
NnApiHandler::SetDeviceName(name);
nnapi_device_feature_level_ = feature_level;
GetDeviceCountReturnsCount<2>();
nnapi_->ANeuralNetworks_getDevice =
[](uint32_t devIndex, ANeuralNetworksDevice** device) -> int {
if (devIndex > 1) {
return ANEURALNETWORKS_BAD_DATA;
}
if (devIndex == 1) {
*device =
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice);
} else {
*device = reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice);
}
return ANEURALNETWORKS_NO_ERROR;
};
nnapi_->ANeuralNetworksDevice_getName =
[](const ANeuralNetworksDevice* device, const char** name) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*name = NnApiHandler::nnapi_device_name_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*name = NnApiHandler::kNnapiReferenceDeviceName;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
nnapi_->ANeuralNetworksDevice_getFeatureLevel =
[](const ANeuralNetworksDevice* device, int64_t* featureLevel) -> int {
if (device ==
reinterpret_cast<ANeuralNetworksDevice*>(NnApiHandler::kNnapiDevice)) {
*featureLevel = NnApiHandler::nnapi_device_feature_level_;
return ANEURALNETWORKS_NO_ERROR;
}
if (device == reinterpret_cast<ANeuralNetworksDevice*>(
NnApiHandler::kNnapiReferenceDevice)) {
*featureLevel = 1000;
return ANEURALNETWORKS_NO_ERROR;
}
return ANEURALNETWORKS_BAD_DATA;
};
}
}
} | #include "tensorflow/lite/nnapi/nnapi_handler.h"
#include <cstdint>
#include <cstdio>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
namespace tflite {
namespace nnapi {
using testing::Eq;
using testing::Ne;
using testing::NotNull;
void ExpectEquals(const NnApi& left, const NnApi& right);
class NnApiHandlerTest : public ::testing::Test {
protected:
~NnApiHandlerTest() override { NnApiHandler::Instance()->Reset(); }
};
TEST_F(NnApiHandlerTest, ShouldAlterNnApiInstanceBehaviour) {
const NnApi* nnapi = NnApiImplementation();
const auto device_count_stub = [](uint32_t* device_count) -> int {
*device_count = 999;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_stub);
ASSERT_THAT(nnapi->ANeuralNetworks_getDeviceCount, NotNull());
uint32_t device_count = 0;
nnapi->ANeuralNetworks_getDeviceCount(&device_count);
EXPECT_THAT(device_count, Eq(999));
}
TEST_F(NnApiHandlerTest, ShouldRestoreNnApiToItsOriginalValueWithReset) {
NnApi nnapi_orig_copy = *NnApiImplementation();
auto device_count_override = [](uint32_t* device_count) -> int {
*device_count = 777;
return ANEURALNETWORKS_NO_ERROR;
};
NnApiHandler::Instance()->StubGetDeviceCountWith(device_count_override);
EXPECT_THAT(nnapi_orig_copy.ANeuralNetworks_getDeviceCount,
Ne(NnApiImplementation()->ANeuralNetworks_getDeviceCount));
NnApiHandler::Instance()->Reset();
ExpectEquals(nnapi_orig_copy, *NnApiImplementation());
}
int (*device_count_ptr)(uint32_t*);
TEST_F(NnApiHandlerTest, ShouldSupportPassthroughCalls) {
const NnApi* nnapi = NnApiImplementation();
device_count_ptr = nnapi->ANeuralNetworks_getDeviceCount;
NnApiHandler::Instance()->StubGetDeviceCountWith(
[](uint32_t* device_count) -> int {
return NnApiPassthroughInstance()->ANeuralNetworks_getDeviceCount ==
device_count_ptr;
});
uint32_t device_count = 0;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount(&device_count), Eq(1));
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI11) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(28, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
}
TEST_F(NnApiHandlerTest, ShouldSetNnApiMembersToNullAsPerSdkVersion_NNAPI10) {
auto* handler = NnApiHandler::Instance();
handler->SetNnapiSupportedDevice("devvice", 1000);
handler->GetSupportedOperationsForDevicesReturns<1>();
handler->CompilationCreateForDevicesReturns<1>();
handler->ExecutionComputeReturns<1>();
handler->MemoryCreateFromFdReturns<1>();
handler->SetAndroidSdkVersion(27, true);
const NnApi* nnapi = NnApiImplementation();
using ::testing::IsNull;
EXPECT_THAT(nnapi->ANeuralNetworks_getDeviceCount, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworks_getDevice, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getName, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getVersion, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getFeatureLevel, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getSupportedOperationsForDevices,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_createForDevices, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksCompilation_setCaching, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_compute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandRank, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getOutputOperandDimensions,
IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_create, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksBurst_free, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_burstCompute, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksMemory_createFromAHardwareBuffer, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_setMeasureTiming, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksExecution_getDuration, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksDevice_getExtensionSupport, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperandType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_getExtensionOperationType, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_setOperandExtensionData, IsNull());
EXPECT_THAT(nnapi->ANeuralNetworksModel_relaxComputationFloat32toFloat16,
IsNull());
}
void ExpectEquals(const NnApi& left, const NnApi& right) {
#define EXPECT_NNAPI_MEMBER_EQ(name) EXPECT_EQ(left.name, right.name)
EXPECT_NNAPI_MEMBER_EQ(nnapi_exists);
EXPECT_NNAPI_MEMBER_EQ(android_sdk_version);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromFd);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperand);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValue);
EXPECT_NNAPI_MEMBER_EQ(
ANeuralNetworksModel_setOperandSymmPerChannelQuantParams);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_setOperandValueFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_addOperation);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_identifyInputsAndOutputs);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setPreference);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_finish);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setInputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutput);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setOutputFromMemory);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_startCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_wait);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksEvent_free);
EXPECT_NNAPI_MEMBER_EQ(ASharedMemory_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDeviceCount);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworks_getDevice);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getName);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getVersion);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getFeatureLevel);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksDevice_getType);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_createForDevices);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksCompilation_setCaching);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_compute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandRank);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getOutputOperandDimensions);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_create);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksBurst_free);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_burstCompute);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_setMeasureTiming);
EXPECT_NNAPI_MEMBER_EQ(ANeuralNetworksExecution_getDuration);
#undef EXPECT_NNAPI_MEMBER_EQ
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_handler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/nnapi/nnapi_handler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
915c18f8-e578-43d6-8287-09d0e320b63d | cpp | tensorflow/tensorflow | transpose_utils | tensorflow/lite/kernels/internal/transpose_utils.cc | tensorflow/lite/kernels/internal/transpose_utils_test.cc | #include "tensorflow/lite/kernels/internal/transpose_utils.h"
namespace tflite {
namespace transpose_utils {
bool IsTranspose2DApplicable(const TransposeParams& params,
const RuntimeShape& input_shape, int* dim0,
int* dim1) {
const int dims_cnt = input_shape.DimensionsCount();
if (dims_cnt == 2) {
*dim0 = input_shape.Dims(0);
*dim1 = input_shape.Dims(1);
return true;
}
const int first_perm = params.perm[0];
for (int i = 1; i < dims_cnt; ++i) {
int rebased = params.perm[i] - first_perm;
if (rebased < 0) {
rebased += dims_cnt;
}
if (rebased != i) {
return false;
}
}
*dim0 = 1;
*dim1 = 1;
for (int i = 0; i < dims_cnt; ++i) {
if (i < first_perm) {
*dim0 *= input_shape.Dims(i);
} else {
*dim1 *= input_shape.Dims(i);
}
}
return true;
}
void RemoveOneSizeDimensions(RuntimeShape* input_shape,
RuntimeShape* output_shape,
TransposeParams* params) {
const int dims_cnt = input_shape->DimensionsCount();
TFLITE_DCHECK_EQ(params->perm_count, dims_cnt);
bool foundOneSizeDim = false;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
foundOneSizeDim = true;
break;
}
}
if (!foundOneSizeDim) return;
if (input_shape->FlatSize() == 1) {
input_shape->Resize(1);
input_shape->SetDim(0, 1);
output_shape->Resize(1);
output_shape->SetDim(0, 1);
params->perm_count = 1;
params->perm[0] = 0;
return;
}
int new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (input_shape->Dims(i) == 1) {
continue;
}
input_shape->SetDim(new_dims_cnt, input_shape->Dims(i));
++new_dims_cnt;
}
input_shape->Resize(new_dims_cnt);
TransposeParams new_params;
new_dims_cnt = 0;
for (int i = 0; i < dims_cnt; ++i) {
if (output_shape->Dims(i) == 1) {
continue;
}
new_params.perm[new_dims_cnt] = params->perm[i];
output_shape->SetDim(new_dims_cnt, output_shape->Dims(i));
++new_dims_cnt;
}
output_shape->Resize(new_dims_cnt);
new_params.perm_count = new_dims_cnt;
for (int i = 0; i < new_dims_cnt; ++i) {
int min_val_idx = -1;
for (int j = 0; j < new_dims_cnt; ++j) {
if (new_params.perm[j] >= i &&
(min_val_idx == -1 ||
new_params.perm[min_val_idx] > new_params.perm[j])) {
min_val_idx = j;
}
}
new_params.perm[min_val_idx] = i;
}
*params = new_params;
}
size_t Flatten(const RuntimeShape& input_shape,
const RuntimeShape& output_shape, const TransposeParams& params,
RuntimeShape* non_flatten_input_shape,
RuntimeShape* non_flatten_output_shape,
TransposeParams* non_flatten_params) {
int skip_dims_cnt = 0;
size_t flat_size = input_shape.FlatSize();
for (int i = 0; i < params.perm_count; ++i) {
if (params.perm[i] == i) {
flat_size /= input_shape.Dims(i);
++skip_dims_cnt;
} else {
break;
}
}
const int new_dims_cnt = params.perm_count - skip_dims_cnt;
non_flatten_input_shape->Resize(new_dims_cnt);
non_flatten_output_shape->Resize(new_dims_cnt);
non_flatten_params->perm_count = new_dims_cnt;
for (int i = skip_dims_cnt; i < params.perm_count; ++i) {
non_flatten_input_shape->SetDim(i - skip_dims_cnt, input_shape.Dims(i));
non_flatten_output_shape->SetDim(i - skip_dims_cnt, output_shape.Dims(i));
non_flatten_params->perm[i - skip_dims_cnt] =
params.perm[i] - skip_dims_cnt;
}
return flat_size;
}
}
} | #include "tensorflow/lite/kernels/internal/transpose_utils.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace {
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_1DNoChanges) {
RuntimeShape input_shape({9});
RuntimeShape output_shape({9});
TransposeParams params;
params.perm_count = 1;
params.perm[0] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DNoChanges) {
RuntimeShape input_shape({9, 3});
RuntimeShape output_shape({3, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_2DShrinking) {
RuntimeShape input_shape({9, 1});
RuntimeShape output_shape({1, 9});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9}));
EXPECT_EQ(output_shape, RuntimeShape({9}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DNoChanges) {
RuntimeShape input_shape({4, 3, 8});
RuntimeShape output_shape({8, 4, 3});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 3, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4, 3}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 2);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingOnce) {
RuntimeShape input_shape({4, 1, 8});
RuntimeShape output_shape({8, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4, 8}));
EXPECT_EQ(output_shape, RuntimeShape({8, 4}));
EXPECT_EQ(output_shape.Dims(1), 4);
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DShrinkingTwice) {
RuntimeShape input_shape({4, 1, 1});
RuntimeShape output_shape({1, 4, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({4}));
EXPECT_EQ(output_shape, RuntimeShape({4}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_3DAllOnes) {
RuntimeShape input_shape({1, 1, 1});
RuntimeShape output_shape({1, 1, 1});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DNoChanges) {
RuntimeShape input_shape({9, 3, 2, 4});
RuntimeShape output_shape({3, 9, 4, 2});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 2, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4, 2}));
EXPECT_EQ(params.perm_count, 4);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 3);
EXPECT_EQ(params.perm[3], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingOnce) {
RuntimeShape input_shape({9, 3, 1, 4});
RuntimeShape output_shape({3, 9, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 0;
params.perm[2] = 3;
params.perm[3] = 2;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({9, 3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 9, 4}));
EXPECT_EQ(params.perm_count, 3);
EXPECT_EQ(params.perm[0], 1);
EXPECT_EQ(params.perm[1], 0);
EXPECT_EQ(params.perm[2], 2);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingTwice) {
RuntimeShape input_shape({1, 3, 1, 4});
RuntimeShape output_shape({3, 1, 4, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({3, 4}));
EXPECT_EQ(output_shape, RuntimeShape({3, 4}));
EXPECT_EQ(params.perm_count, 2);
EXPECT_EQ(params.perm[0], 0);
EXPECT_EQ(params.perm[1], 1);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DShrinkingThirdTimes) {
RuntimeShape input_shape({1, 1, 7, 1});
RuntimeShape output_shape({1, 7, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({7}));
EXPECT_EQ(output_shape, RuntimeShape({7}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, RemoveOneSizeDimensions_4DAllOnes) {
RuntimeShape input_shape({1, 1, 1, 1});
RuntimeShape output_shape({1, 1, 1, 1});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
transpose_utils::RemoveOneSizeDimensions(&input_shape, &output_shape,
¶ms);
EXPECT_EQ(input_shape, RuntimeShape({1}));
EXPECT_EQ(output_shape, RuntimeShape({1}));
EXPECT_EQ(params.perm_count, 1);
EXPECT_EQ(params.perm[0], 0);
}
TEST(TransposeUtilsTest, Flatten3D) {
RuntimeShape input_shape({3, 5, 7});
RuntimeShape output_shape({3, 7, 5});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5}));
EXPECT_EQ(non_flatten_size, 5 * 7);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, Flatten4DFlattenOnce) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 7, 5, 9});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 3;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({5, 7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({7, 5, 9}));
EXPECT_EQ(non_flatten_size, 5 * 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 3);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
EXPECT_EQ(non_flatten_params.perm[2], 2);
}
TEST(TransposeUtilsTest, Flatten4DFlattenTwice) {
RuntimeShape input_shape({3, 5, 7, 9});
RuntimeShape output_shape({3, 5, 9, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 0;
params.perm[1] = 1;
params.perm[2] = 3;
params.perm[3] = 2;
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
size_t non_flatten_size = transpose_utils::Flatten(
input_shape, output_shape, params, &non_flatten_input_shape,
&non_flatten_output_shape, &non_flatten_params);
EXPECT_EQ(non_flatten_input_shape, RuntimeShape({7, 9}));
EXPECT_EQ(non_flatten_output_shape, RuntimeShape({9, 7}));
EXPECT_EQ(non_flatten_size, 7 * 9);
EXPECT_EQ(non_flatten_params.perm_count, 2);
EXPECT_EQ(non_flatten_params.perm[0], 1);
EXPECT_EQ(non_flatten_params.perm[1], 0);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable2D) {
RuntimeShape input_shape({4, 5});
TransposeParams params;
params.perm_count = 2;
params.perm[0] = 1;
params.perm[1] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 5);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DOne) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 30);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DTwo) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 0;
params.perm[2] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 6);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable3DNotApplicable) {
RuntimeShape input_shape({4, 5, 6});
TransposeParams params;
params.perm_count = 3;
params.perm[0] = 2;
params.perm[1] = 1;
params.perm[2] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DOne) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 1;
params.perm[1] = 2;
params.perm[2] = 3;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 4);
EXPECT_EQ(dim1, 210);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DTwo) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 2;
params.perm[1] = 3;
params.perm[2] = 0;
params.perm[3] = 1;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 20);
EXPECT_EQ(dim1, 42);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DThird) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 0;
params.perm[2] = 1;
params.perm[3] = 2;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_TRUE(applicable);
EXPECT_EQ(dim0, 120);
EXPECT_EQ(dim1, 7);
}
TEST(TransposeUtilsTest, IsTranspose2DApplicable4DNotApplicable) {
RuntimeShape input_shape({4, 5, 6, 7});
TransposeParams params;
params.perm_count = 4;
params.perm[0] = 3;
params.perm[1] = 2;
params.perm[2] = 1;
params.perm[3] = 0;
int dim0, dim1;
bool applicable = transpose_utils::IsTranspose2DApplicable(
params, input_shape, &dim0, &dim1);
EXPECT_FALSE(applicable);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/transpose_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/transpose_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3bc61d67-4a76-418b-a671-c9c315c7a6bd | cpp | tensorflow/tensorflow | change_op_data_type | third_party/xla/xla/service/change_op_data_type.cc | third_party/xla/xla/service/change_op_data_type_test.cc | #include "xla/service/change_op_data_type.h"
#include <optional>
#include "xla/service/hlo_creation_utils.h"
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_contraction_rewriter.h"
#endif
namespace xla {
namespace {
std::optional<PrimitiveType> GetUniformOperandType(
const HloInstruction* instr) {
std::optional<PrimitiveType> type;
for (const HloInstruction* operand : instr->operands()) {
if (!type.has_value()) {
type = operand->shape().element_type();
} else if (operand->shape().element_type() != type.value()) {
return std::nullopt;
}
}
return type;
}
}
absl::StatusOr<bool> ChangeOpDataType::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
HloCloner default_cloner = [](const HloInstruction* inst, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
return inst->CloneWithNewOperands(shape, operands);
};
HloCloner cloner = cloner_ ? cloner_ : default_cloner;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
std::optional<PrimitiveType> operand_type = GetUniformOperandType(instr);
if (!op_matcher_(instr) || !operand_type.has_value() ||
!instr->shape().IsArray() ||
instr->opcode() == HloOpcode::kParameter) {
continue;
}
const PrimitiveType from_type = *operand_type;
auto it = to_type_map_.find(from_type);
if (it == to_type_map_.end()) {
continue;
}
#if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
if (cpu::OneDnnContractionRewriter::ShouldRewriteInstr(instr, true)) {
continue;
}
#endif
const PrimitiveType to_type = it->second;
absl::InlinedVector<HloInstruction*, 8> new_operands;
for (HloInstruction* operand : instr->mutable_operands()) {
new_operands.push_back(MakeConvertToHlo(operand, to_type));
}
Shape new_shape = instr->shape();
new_shape.set_element_type(to_type);
HloInstruction* new_instr =
comp->AddInstruction(cloner(instr, new_shape, new_operands));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(
instr, MakeConvertToHlo(new_instr, from_type)));
changed = true;
}
}
return changed;
}
} | #include "xla/service/change_op_data_type.h"
#include <string>
#include <tuple>
#include <vector>
#include "absl/types/span.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
namespace m = ::xla::match;
class ChangeOpDataTypeTest : public HloTestBase {
public:
ChangeOpDataTypeTest()
: HloTestBase(false,
false) {}
};
TEST_F(ChangeOpDataTypeTest, Simple) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
}
TEST_F(ChangeOpDataTypeTest, AllTypesMustBeSame) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = f16[1] dynamic-slice(f16[10] parameter(0), s32[1] parameter(1)), dynamic_slice_sizes={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(F16, F32, HloPredicateTrue);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_FALSE(changed);
}
TEST_F(ChangeOpDataTypeTest, DotAndConv) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
dot = f16[10,10] dot(f16[10,10] parameter(0), f16[10,10] parameter(1)),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
conv = f16[1,2,1] convolution(f16[1,2,1] parameter(2), f16[1,1,1] parameter(3)),
window={size=1}, dim_labels=b0f_0io->b0f
root = tuple(dot, conv)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ChangeOpDataType pass(
F16, F32, HloPredicateIsOp<HloOpcode::kDot, HloOpcode::kConvolution>);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Convert(
m::Dot(m::Convert(m::Parameter(0)).WithShape(F32, {10, 10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10, 10})))
.WithShape(F16, {10, 10}),
m::Convert(m::Convolution(
m::Convert(m::Parameter(2)).WithShape(F32, {1, 2, 1}),
m::Convert(m::Parameter(3)).WithShape(F32, {1, 1, 1})))
.WithShape(F16, {1, 2, 1}))));
}
TEST_F(ChangeOpDataTypeTest, SimpleWithCloner) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
ROOT op = add(f16[10] parameter(0), f16[10] parameter(1))
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
int count = 0;
ChangeOpDataType::HloCloner cloner =
[&count](const HloInstruction* instr, const Shape& shape,
absl::Span<HloInstruction* const> operands) {
count++;
return instr->CloneWithNewOperands(shape, operands);
};
ChangeOpDataType pass(F16, F32, matcher, cloner);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
EXPECT_EQ(count, 1);
}
TEST_F(ChangeOpDataTypeTest, SimpleWithMultipleTypes) {
const char* const kModuleStr = R"(
HloModule module
ENTRY entry {
op1 = add(f16[10] parameter(0), f16[10] parameter(1))
op2 = add(u16[10] parameter(2), u16[10] parameter(3))
ROOT tup = (f16[10], u16[10]) tuple(op1, op2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
HloPredicate matcher = HloPredicateTrue;
ChangeOpDataType pass({{F16, F32}, {U16, U32}}, matcher);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
SCOPED_TRACE(module->ToString());
EXPECT_TRUE(changed);
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kTuple);
EXPECT_EQ(root->operand_count(), 2);
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(0)).WithShape(F32, {10}),
m::Convert(m::Parameter(1)).WithShape(F32, {10})))
.WithShape(F16, {10})));
EXPECT_THAT(
root->operand(1),
GmockMatch(
m::Convert(m::Add(m::Convert(m::Parameter(2)).WithShape(U32, {10}),
m::Convert(m::Parameter(3)).WithShape(U32, {10})))
.WithShape(U16, {10})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/change_op_data_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7da06ba-53b8-4515-bc96-3129eeae5a96 | cpp | google/cel-cpp | bytes_value | common/values/bytes_value.cc | common/values/bytes_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
#include "internal/strings.h"
namespace cel {
namespace {
template <typename Bytes>
std::string BytesDebugString(const Bytes& value) {
return value.NativeValue(absl::Overload(
[](absl::string_view string) -> std::string {
return internal::FormatBytesLiteral(string);
},
[](const absl::Cord& cord) -> std::string {
if (auto flat = cord.TryFlat(); flat.has_value()) {
return internal::FormatBytesLiteral(*flat);
}
return internal::FormatBytesLiteral(static_cast<std::string>(cord));
}));
}
}
std::string BytesValue::DebugString() const { return BytesDebugString(*this); }
absl::Status BytesValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return NativeValue([&value](const auto& bytes) -> absl::Status {
return internal::SerializeBytesValue(bytes, value);
});
}
absl::StatusOr<Json> BytesValue::ConvertToJson(AnyToJsonConverter&) const {
return NativeValue(
[](const auto& value) -> Json { return JsonBytes(value); });
}
absl::Status BytesValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<BytesValue>(other); other_value.has_value()) {
result = NativeValue([other_value](const auto& value) -> BoolValue {
return other_value->NativeValue(
[&value](const auto& other_value) -> BoolValue {
return BoolValue{value == other_value};
});
});
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
size_t BytesValue::Size() const {
return NativeValue(
[](const auto& alternative) -> size_t { return alternative.size(); });
}
bool BytesValue::IsEmpty() const {
return NativeValue(
[](const auto& alternative) -> bool { return alternative.empty(); });
}
bool BytesValue::Equals(absl::string_view bytes) const {
return NativeValue([bytes](const auto& alternative) -> bool {
return alternative == bytes;
});
}
bool BytesValue::Equals(const absl::Cord& bytes) const {
return NativeValue([&bytes](const auto& alternative) -> bool {
return alternative == bytes;
});
}
bool BytesValue::Equals(const BytesValue& bytes) const {
return bytes.NativeValue(
[this](const auto& alternative) -> bool { return Equals(alternative); });
}
namespace {
int CompareImpl(absl::string_view lhs, absl::string_view rhs) {
return lhs.compare(rhs);
}
int CompareImpl(absl::string_view lhs, const absl::Cord& rhs) {
return -rhs.Compare(lhs);
}
int CompareImpl(const absl::Cord& lhs, absl::string_view rhs) {
return lhs.Compare(rhs);
}
int CompareImpl(const absl::Cord& lhs, const absl::Cord& rhs) {
return lhs.Compare(rhs);
}
}
int BytesValue::Compare(absl::string_view bytes) const {
return NativeValue([bytes](const auto& alternative) -> int {
return CompareImpl(alternative, bytes);
});
}
int BytesValue::Compare(const absl::Cord& bytes) const {
return NativeValue([&bytes](const auto& alternative) -> int {
return CompareImpl(alternative, bytes);
});
}
int BytesValue::Compare(const BytesValue& bytes) const {
return bytes.NativeValue(
[this](const auto& alternative) -> int { return Compare(alternative); });
}
} | #include <sstream>
#include <string>
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using BytesValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(BytesValueTest, Kind) {
EXPECT_EQ(BytesValue("foo").kind(), BytesValue::kKind);
EXPECT_EQ(Value(BytesValue(absl::Cord("foo"))).kind(), BytesValue::kKind);
}
TEST_P(BytesValueTest, DebugString) {
{
std::ostringstream out;
out << BytesValue("foo");
EXPECT_EQ(out.str(), "b\"foo\"");
}
{
std::ostringstream out;
out << BytesValue(absl::MakeFragmentedCord({"f", "o", "o"}));
EXPECT_EQ(out.str(), "b\"foo\"");
}
{
std::ostringstream out;
out << Value(BytesValue(absl::Cord("foo")));
EXPECT_EQ(out.str(), "b\"foo\"");
}
}
TEST_P(BytesValueTest, ConvertToJson) {
EXPECT_THAT(BytesValue("foo").ConvertToJson(value_manager()),
IsOkAndHolds(Json(JsonBytes("foo"))));
}
TEST_P(BytesValueTest, NativeValue) {
std::string scratch;
EXPECT_EQ(BytesValue("foo").NativeString(), "foo");
EXPECT_EQ(BytesValue("foo").NativeString(scratch), "foo");
EXPECT_EQ(BytesValue("foo").NativeCord(), "foo");
}
TEST_P(BytesValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(BytesValue("foo")),
NativeTypeId::For<BytesValue>());
EXPECT_EQ(NativeTypeId::Of(Value(BytesValue(absl::Cord("foo")))),
NativeTypeId::For<BytesValue>());
}
TEST_P(BytesValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<BytesValue>(BytesValue("foo")));
EXPECT_TRUE(InstanceOf<BytesValue>(Value(BytesValue(absl::Cord("foo")))));
}
TEST_P(BytesValueTest, Cast) {
EXPECT_THAT(Cast<BytesValue>(BytesValue("foo")), An<BytesValue>());
EXPECT_THAT(Cast<BytesValue>(Value(BytesValue(absl::Cord("foo")))),
An<BytesValue>());
}
TEST_P(BytesValueTest, As) {
EXPECT_THAT(As<BytesValue>(Value(BytesValue(absl::Cord("foo")))),
Ne(absl::nullopt));
}
TEST_P(BytesValueTest, StringViewEquality) {
EXPECT_TRUE(BytesValue("foo") == "foo");
EXPECT_FALSE(BytesValue("foo") == "bar");
EXPECT_TRUE("foo" == BytesValue("foo"));
EXPECT_FALSE("bar" == BytesValue("foo"));
}
TEST_P(BytesValueTest, StringViewInequality) {
EXPECT_FALSE(BytesValue("foo") != "foo");
EXPECT_TRUE(BytesValue("foo") != "bar");
EXPECT_FALSE("foo" != BytesValue("foo"));
EXPECT_TRUE("bar" != BytesValue("foo"));
}
INSTANTIATE_TEST_SUITE_P(
BytesValueTest, BytesValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
BytesValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/bytes_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/bytes_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
f8652a50-48a4-49b0-8ae4-073028700611 | cpp | tensorflow/tensorflow | gpu_fusion | tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc | third_party/xla/xla/service/gpu/tests/gpu_fusion_test.cc | #include "llvm/ADT/STLExtras.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#define DEBUG_TYPE "tf-gpu-op-fusion"
namespace mlir {
namespace TF {
namespace {
#define GEN_PASS_DEF_TENSORFLOWGPUFUSION
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
class GpuOpFusionPass : public impl::TensorflowGPUFusionBase<GpuOpFusionPass> {
public:
void runOnOperation() final;
};
struct ReluToFusedBatchNorm : public OpRewritePattern<ReluOp> {
using OpRewritePattern<ReluOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ReluOp relu_op,
PatternRewriter &rewriter) const override {
Operation *relu_input = relu_op.getFeatures().getDefiningOp();
if (!relu_input) return failure();
auto batch_norm = dyn_cast_or_null<FusedBatchNormV3Op>(relu_input);
AddV2Op add_op;
Value side_input;
if (!batch_norm) {
add_op = dyn_cast_or_null<AddV2Op>(relu_input);
if (!add_op) return failure();
batch_norm =
dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getX().getDefiningOp());
if (batch_norm) {
side_input = add_op.getY();
} else {
batch_norm =
dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getY().getDefiningOp());
if (!batch_norm) return failure();
side_input = add_op.getX();
}
}
assert(batch_norm);
if (batch_norm.getIsTraining()) return failure();
if (!batch_norm.getY().hasOneUse()) return failure();
OperationState state(batch_norm.getLoc(),
_FusedBatchNormExOp::getOperationName());
state.addOperands(batch_norm.getOperands());
if (side_input) state.operands.push_back(side_input);
state.addTypes(batch_norm.getResultTypes());
state.addAttributes(batch_norm->getAttrs());
Operation *op = rewriter.create(state);
rewriter.replaceOp(batch_norm, op->getResults());
if (!add_op || add_op.getZ().hasOneUse()) {
op->setAttr("activation_mode", rewriter.getStringAttr("Relu"));
rewriter.replaceOp(relu_op, op->getResult(0));
}
if (add_op) {
rewriter.replaceOp(add_op, op->getResult(0));
}
return success();
}
};
void GpuOpFusionPass::runOnOperation() {
func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
patterns.add<ReluToFusedBatchNorm>(&getContext());
(void)applyPatternsAndFoldGreedily(func, std::move(patterns));
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateGpuOpFusionPass() {
return std::make_unique<GpuOpFusionPass>();
}
}
} | #include <cstdint>
#include <optional>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/gpu/transforms/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
class GpuFusionTest : public GpuCodegenTest {};
TEST_F(GpuFusionTest, FusedReshape) {
const char* hlo_text = R"(
HloModule test_module
fused_computation {
p0.param_0 = f32[4,1,1]{2,1,0} parameter(0)
p1.param_1 = f32[4,1]{1,0} parameter(1)
reshape = f32[4,1]{1,0} reshape(p0.param_0)
ROOT add = f32[4,1] add(reshape, p1.param_1)
}
ENTRY BroadcastIntoAdd {
p0 = f32[4,1,1]{2,1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
ROOT fusion = f32[4,1]{1,0} fusion(p0, p1), kind=kLoop,
calls=fused_computation
}
)";
CompileAndVerifyIr(hlo_text,
R"(
; CHECK-LABEL: @fusion
; CHECK: fadd
; CHECK: }
)",
false,
false);
}
TEST_F(GpuFusionTest, FusedBiggerThenThresholdButDoNotChangeTheFusionl) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {10, 100});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 2});
Shape concat_shape = ShapeUtil::MakeShape(F32, {10, 2 * kNumParams});
HloInstruction* input =
b.AddInstruction(HloInstruction::CreateParameter(0, input_shape, "p"));
std::vector<HloInstruction*> slice_params;
for (int64_t i = 0; i < kNumParams; ++i) {
slice_params.push_back(b.AddInstruction(HloInstruction::CreateSlice(
slice_shape, input, {0, 0}, {10, 2}, {1, 1})));
}
b.AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, slice_params, 1));
module->AddEntryComputation(b.Build());
EXPECT_TRUE(GpuInstructionFusion(false,
TestGpuDeviceInfo::RTXA6000DeviceInfo())
.Run(module.get())
.value());
EXPECT_TRUE(module->entry_computation()->root_instruction()->opcode() ==
HloOpcode::kFusion);
for (HloInstruction* instr : module->entry_computation()->instructions()) {
EXPECT_TRUE(instr->opcode() != HloOpcode::kSlice);
}
}
class TransposeFusionTest : public GpuFusionTest {
public:
void CheckGpuFusion(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
GpuInstructionFusion{true,
TestGpuDeviceInfo::RTXA6000DeviceInfo()},
expected);
}
};
TEST_F(TransposeFusionTest, ElementaryWithTranspose) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
s = sqrt(p)
ROOT t = f32[32,16]{1,0} transpose(s), dimensions={1,0}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeAfterTransposeFused) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
s = sqrt(p)
t = f32[32,16]{1,0} transpose(s), dimensions={1,0}
ROOT r = f32[32,16,1]{2,1,0} reshape(t)
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeSimpleFusion) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[256,16]{1,0} parameter(0)
r = f32[16,16,16]{2,1,0} reshape(p)
s = sqrt(r)
ROOT t = f32[16,16,16]{2,1,0} transpose(s), dimensions={0,2,1}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ElementaryLogical) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[1,16,32]{2,1,0} parameter(0)
s = f32[1,16,32]{2,1,0} sqrt(p)
ROOT c = f32[1,32,16]{2,1,0} transpose(s), dimensions={0,2,1}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeSimpleFusionLogical) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[256,16]{1,0} parameter(0)
r = f32[16,16,16]{2,1,0} reshape(p)
s = sqrt(r)
ROOT c = f32[16,16,16]{2,1,0} transpose(s), dimensions={1,0,2}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/gpu_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ea45c4e-ae1e-40a8-ad15-5bdd8477954e | cpp | tensorflow/tensorflow | popcnt | tensorflow/lite/experimental/shlo/ops/popcnt.cc | tensorflow/lite/experimental/shlo/ops/popcnt_test.cc | #include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <type_traits>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
};
PopcntOp Create(PopcntOp::Attributes) { return {}; }
absl::Status Prepare(PopcntOp& op, const Tensor& input, Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(input.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("popcnt"), input, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("popcnt"), input, output));
return absl::OkStatus();
}
absl::Status Evaluate(PopcntOp& op, const Tensor& input, Tensor& output) {
Popcnt popcnt;
if (IsIntTensor(input)) {
DISPATCH_INT(detail::EvaluateNoQuantization, input.tensor_element_type(),
popcnt, input, output);
}
return absl::FailedPreconditionError(
"stablehlo.popcnt: Unsupported tensor type.");
}
}; | #include "tensorflow/lite/experimental/shlo/ops/popcnt.h"
#include <cstdint>
#include <limits>
#include <string>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/numeric/bits.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/i4.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/unary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::NanSensitiveFloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<PopcntOp> {
static std::string Get() { return "Popcnt"; }
};
template <>
struct SupportedOpDataType<PopcntOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
struct Popcnt {
template <class T>
T operator()(T v) const {
if constexpr (std::is_same_v<I4, T>) {
return I4(absl::popcount(static_cast<uint8_t>(v & 0xf)));
} else {
return absl::popcount(static_cast<std::make_unsigned_t<T>>(v));
}
}
} popcnt_ref;
using PopcntTypes = ::testing::Types<int32_t, int16_t, int8_t, I4>;
template <class T>
struct PopcntFunctorTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntFunctorTest, PopcntTypes);
TYPED_TEST(PopcntFunctorTest, GivesCorrectResults) {
int64_t bit_count = 8 * sizeof(TypeParam);
if constexpr (std::is_same_v<I4, TypeParam>) {
bit_count = 4;
}
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::lowest()), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(-1)), bit_count);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(0)), 0);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(1)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(2)), 1);
EXPECT_EQ(popcnt_ref(static_cast<TypeParam>(3)), 2);
EXPECT_EQ(popcnt_ref(std::numeric_limits<TypeParam>::max()), bit_count - 1);
}
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseOpShapePropagationTest,
PopcntOp, TestParamNames);
INSTANTIATE_TYPED_TEST_SUITE_P(
Popcnt, UnaryElementwiseSameBaselineElementTypeConstraintTest,
BaselineMismatchSignedIntegerTypes<PopcntOp>, TestParamNames);
using UnsupportedTypes =
WithOpTypes<PopcntOp, ConcatTypes<BoolTestType, FloatTestTypes,
PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Popcnt, UnaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
template <class T>
struct PopcntTest : ::testing::Test {};
TYPED_TEST_SUITE(PopcntTest, IntTestTypes, TestParamNames);
TYPED_TEST(PopcntTest, IntTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> input_data = IotaBuffer<TypeParam::kStorage>(shape, -12);
Vector<StorageT> output_data(shape.NumElements());
Tensor input_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = input_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(input_data, expected_data.begin(), popcnt_ref);
auto op = Create(PopcntOp::Attributes{});
ASSERT_OK(Prepare(op, input_tensor, output_tensor));
ASSERT_OK(Evaluate(op, input_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(NanSensitiveFloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/popcnt.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/popcnt_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c74a6817-3eb7-4344-996d-e523ce4fc6bf | cpp | google/libphonenumber | shortnumberinfo | cpp/src/phonenumbers/shortnumberinfo.cc | cpp/test/phonenumbers/shortnumberinfo_test.cc | #include "phonenumbers/shortnumberinfo.h"
#include <algorithm>
#include <string.h>
#include <iterator>
#include <map>
#include "phonenumbers/default_logger.h"
#include "phonenumbers/matcher_api.h"
#include "phonenumbers/phonemetadata.pb.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/regex_based_matcher.h"
#include "phonenumbers/region_code.h"
#include "phonenumbers/short_metadata.h"
namespace i18n {
namespace phonenumbers {
using google::protobuf::RepeatedField;
using std::map;
using std::string;
bool LoadCompiledInMetadata(PhoneMetadataCollection* metadata) {
if (!metadata->ParseFromArray(short_metadata_get(), short_metadata_size())) {
LOG(ERROR) << "Could not parse binary data.";
return false;
}
return true;
}
ShortNumberInfo::ShortNumberInfo()
: phone_util_(*PhoneNumberUtil::GetInstance()),
matcher_api_(new RegexBasedMatcher()),
region_to_short_metadata_map_(new absl::flat_hash_map<string, PhoneMetadata>()),
regions_where_emergency_numbers_must_be_exact_(new absl::flat_hash_set<string>()) {
PhoneMetadataCollection metadata_collection;
if (!LoadCompiledInMetadata(&metadata_collection)) {
LOG(DFATAL) << "Could not parse compiled-in metadata.";
return;
}
for (const auto& metadata : metadata_collection.metadata()) {
const string& region_code = metadata.id();
region_to_short_metadata_map_->insert(std::make_pair(region_code, metadata));
}
regions_where_emergency_numbers_must_be_exact_->insert("BR");
regions_where_emergency_numbers_must_be_exact_->insert("CL");
regions_where_emergency_numbers_must_be_exact_->insert("NI");
}
ShortNumberInfo::~ShortNumberInfo() {}
const PhoneMetadata* ShortNumberInfo::GetMetadataForRegion(
const string& region_code) const {
auto it = region_to_short_metadata_map_->find(region_code);
if (it != region_to_short_metadata_map_->end()) {
return &it->second;
}
return nullptr;
}
namespace {
bool MatchesPossibleNumberAndNationalNumber(
const MatcherApi& matcher_api,
const string& number,
const PhoneNumberDesc& desc) {
const RepeatedField<int>& lengths = desc.possible_length();
if (desc.possible_length_size() > 0 &&
std::find(lengths.begin(), lengths.end(), number.length()) ==
lengths.end()) {
return false;
}
return matcher_api.MatchNationalNumber(number, desc, false);
}
}
bool ShortNumberInfo::RegionDialingFromMatchesNumber(const PhoneNumber& number,
const string& region_dialing_from) const {
list<string> region_codes;
phone_util_.GetRegionCodesForCountryCallingCode(number.country_code(),
®ion_codes);
return std::find(region_codes.begin(),
region_codes.end(),
region_dialing_from) != region_codes.end();
}
bool ShortNumberInfo::IsPossibleShortNumberForRegion(const PhoneNumber& number,
const string& region_dialing_from) const {
if (!RegionDialingFromMatchesNumber(number, region_dialing_from)) {
return false;
}
const PhoneMetadata* phone_metadata =
GetMetadataForRegion(region_dialing_from);
if (!phone_metadata) {
return false;
}
string short_number;
phone_util_.GetNationalSignificantNumber(number, &short_number);
const RepeatedField<int>& lengths =
phone_metadata->general_desc().possible_length();
return (std::find(lengths.begin(), lengths.end(), short_number.length()) !=
lengths.end());
}
bool ShortNumberInfo::IsPossibleShortNumber(const PhoneNumber& number) const {
list<string> region_codes;
phone_util_.GetRegionCodesForCountryCallingCode(number.country_code(),
®ion_codes);
string short_number;
phone_util_.GetNationalSignificantNumber(number, &short_number);
for (const auto& region_code : region_codes) {
const PhoneMetadata* phone_metadata = GetMetadataForRegion(region_code);
if (!phone_metadata) {
continue;
}
const RepeatedField<int>& lengths =
phone_metadata->general_desc().possible_length();
if (std::find(lengths.begin(), lengths.end(), short_number.length()) !=
lengths.end()) {
return true;
}
}
return false;
}
bool ShortNumberInfo::IsValidShortNumberForRegion(
const PhoneNumber& number, const string& region_dialing_from) const {
if (!RegionDialingFromMatchesNumber(number, region_dialing_from)) {
return false;
}
const PhoneMetadata* phone_metadata =
GetMetadataForRegion(region_dialing_from);
if (!phone_metadata) {
return false;
}
string short_number;
phone_util_.GetNationalSignificantNumber(number, &short_number);
const PhoneNumberDesc& general_desc = phone_metadata->general_desc();
if (!MatchesPossibleNumberAndNationalNumber(*matcher_api_, short_number,
general_desc)) {
return false;
}
const PhoneNumberDesc& short_number_desc = phone_metadata->short_code();
return MatchesPossibleNumberAndNationalNumber(*matcher_api_, short_number,
short_number_desc);
}
bool ShortNumberInfo::IsValidShortNumber(const PhoneNumber& number) const {
list<string> region_codes;
phone_util_.GetRegionCodesForCountryCallingCode(number.country_code(),
®ion_codes);
string region_code;
GetRegionCodeForShortNumberFromRegionList(number, region_codes, ®ion_code);
if (region_codes.size() > 1 && region_code != RegionCode::GetUnknown()) {
return true;
}
return IsValidShortNumberForRegion(number, region_code);
}
ShortNumberInfo::ShortNumberCost ShortNumberInfo::GetExpectedCostForRegion(
const PhoneNumber& number, const string& region_dialing_from) const {
if (!RegionDialingFromMatchesNumber(number, region_dialing_from)) {
return ShortNumberInfo::UNKNOWN_COST;
}
const PhoneMetadata* phone_metadata =
GetMetadataForRegion(region_dialing_from);
if (!phone_metadata) {
return ShortNumberInfo::UNKNOWN_COST;
}
string short_number;
phone_util_.GetNationalSignificantNumber(number, &short_number);
const RepeatedField<int>& lengths =
phone_metadata->general_desc().possible_length();
if (std::find(lengths.begin(), lengths.end(), short_number.length()) ==
lengths.end()) {
return ShortNumberInfo::UNKNOWN_COST;
}
if (MatchesPossibleNumberAndNationalNumber(*matcher_api_, short_number,
phone_metadata->premium_rate())) {
return ShortNumberInfo::PREMIUM_RATE;
}
if (MatchesPossibleNumberAndNationalNumber(*matcher_api_, short_number,
phone_metadata->standard_rate())) {
return ShortNumberInfo::STANDARD_RATE;
}
if (MatchesPossibleNumberAndNationalNumber(*matcher_api_, short_number,
phone_metadata->toll_free())) {
return ShortNumberInfo::TOLL_FREE;
}
if (IsEmergencyNumber(short_number, region_dialing_from)) {
return ShortNumberInfo::TOLL_FREE;
}
return ShortNumberInfo::UNKNOWN_COST;
}
ShortNumberInfo::ShortNumberCost ShortNumberInfo::GetExpectedCost(
const PhoneNumber& number) const {
list<string> region_codes;
phone_util_.GetRegionCodesForCountryCallingCode(number.country_code(),
®ion_codes);
if (region_codes.size() == 0) {
return ShortNumberInfo::UNKNOWN_COST;
}
if (region_codes.size() == 1) {
return GetExpectedCostForRegion(number, region_codes.front());
}
ShortNumberInfo::ShortNumberCost cost = ShortNumberInfo::TOLL_FREE;
for (const auto& region_code : region_codes) {
ShortNumberInfo::ShortNumberCost cost_for_region =
GetExpectedCostForRegion(number, region_code);
switch (cost_for_region) {
case ShortNumberInfo::PREMIUM_RATE:
return ShortNumberInfo::PREMIUM_RATE;
case ShortNumberInfo::UNKNOWN_COST:
return ShortNumberInfo::UNKNOWN_COST;
case ShortNumberInfo::STANDARD_RATE:
if (cost != ShortNumberInfo::UNKNOWN_COST) {
cost = ShortNumberInfo::STANDARD_RATE;
}
break;
case ShortNumberInfo::TOLL_FREE:
break;
default:
LOG(ERROR) << "Unrecognised cost for region: "
<< static_cast<int>(cost_for_region);
break;
}
}
return cost;
}
void ShortNumberInfo::GetRegionCodeForShortNumberFromRegionList(
const PhoneNumber& number, const list<string>& region_codes,
string* region_code) const {
if (region_codes.size() == 0) {
region_code->assign(RegionCode::GetUnknown());
return;
} else if (region_codes.size() == 1) {
region_code->assign(region_codes.front());
return;
}
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
for (const auto& region_code_it : region_codes) {
const PhoneMetadata* phone_metadata = GetMetadataForRegion(region_code_it);
if (phone_metadata != nullptr &&
MatchesPossibleNumberAndNationalNumber(*matcher_api_, national_number,
phone_metadata->short_code())) {
region_code->assign(region_code_it);
return;
}
}
region_code->assign(RegionCode::GetUnknown());
}
string ShortNumberInfo::GetExampleShortNumber(const string& region_code) const {
const PhoneMetadata* phone_metadata = GetMetadataForRegion(region_code);
if (!phone_metadata) {
return "";
}
const PhoneNumberDesc& desc = phone_metadata->short_code();
if (desc.has_example_number()) {
return desc.example_number();
}
return "";
}
string ShortNumberInfo::GetExampleShortNumberForCost(const string& region_code,
ShortNumberInfo::ShortNumberCost cost) const {
const PhoneMetadata* phone_metadata = GetMetadataForRegion(region_code);
if (!phone_metadata) {
return "";
}
const PhoneNumberDesc* desc = nullptr;
switch (cost) {
case TOLL_FREE:
desc = &(phone_metadata->toll_free());
break;
case STANDARD_RATE:
desc = &(phone_metadata->standard_rate());
break;
case PREMIUM_RATE:
desc = &(phone_metadata->premium_rate());
break;
default:
break;
}
if (desc != nullptr && desc->has_example_number()) {
return desc->example_number();
}
return "";
}
bool ShortNumberInfo::ConnectsToEmergencyNumber(const string& number,
const string& region_code) const {
return MatchesEmergencyNumberHelper(number, region_code,
true );
}
bool ShortNumberInfo::IsEmergencyNumber(const string& number,
const string& region_code) const {
return MatchesEmergencyNumberHelper(number, region_code,
false );
}
bool ShortNumberInfo::MatchesEmergencyNumberHelper(const string& number,
const string& region_code, bool allow_prefix_match) const {
string extracted_number;
phone_util_.ExtractPossibleNumber(number, &extracted_number);
if (phone_util_.StartsWithPlusCharsPattern(extracted_number)) {
return false;
}
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
if (!metadata || !metadata->has_emergency()) {
return false;
}
phone_util_.NormalizeDigitsOnly(&extracted_number);
bool allow_prefix_match_for_region =
allow_prefix_match &&
regions_where_emergency_numbers_must_be_exact_->find(region_code) ==
regions_where_emergency_numbers_must_be_exact_->end();
return matcher_api_->MatchNationalNumber(
extracted_number, metadata->emergency(), allow_prefix_match_for_region);
}
bool ShortNumberInfo::IsCarrierSpecific(const PhoneNumber& number) const {
list<string> region_codes;
phone_util_.GetRegionCodesForCountryCallingCode(number.country_code(),
®ion_codes);
string region_code;
GetRegionCodeForShortNumberFromRegionList(number, region_codes, ®ion_code);
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
const PhoneMetadata* phone_metadata = GetMetadataForRegion(region_code);
return phone_metadata &&
MatchesPossibleNumberAndNationalNumber(*matcher_api_, national_number,
phone_metadata->carrier_specific());
}
bool ShortNumberInfo::IsCarrierSpecificForRegion(const PhoneNumber& number,
const string& region_dialing_from) const {
if (!RegionDialingFromMatchesNumber(number, region_dialing_from)) {
return false;
}
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
const PhoneMetadata* phone_metadata =
GetMetadataForRegion(region_dialing_from);
return phone_metadata &&
MatchesPossibleNumberAndNationalNumber(*matcher_api_, national_number,
phone_metadata->carrier_specific());
}
bool ShortNumberInfo::IsSmsServiceForRegion(const PhoneNumber& number,
const string& region_dialing_from) const {
if (!RegionDialingFromMatchesNumber(number, region_dialing_from)) {
return false;
}
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
const PhoneMetadata* phone_metadata =
GetMetadataForRegion(region_dialing_from);
return phone_metadata &&
MatchesPossibleNumberAndNationalNumber(*matcher_api_, national_number,
phone_metadata->sms_services());
}
}
} | #include "phonenumbers/shortnumberinfo.h"
#include <gtest/gtest.h>
#include "phonenumbers/base/logging.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/stringutil.h"
#include "phonenumbers/test_util.h"
namespace i18n {
namespace phonenumbers {
class ShortNumberInfoTest : public testing::Test {
public:
ShortNumberInfoTest(const ShortNumberInfoTest&) = delete;
ShortNumberInfoTest& operator=(const ShortNumberInfoTest&) = delete;
protected:
PhoneNumber ParseNumberForTesting(const string& number,
const string& region_code) {
PhoneNumber phone_number;
PhoneNumberUtil::ErrorType error_type = phone_util_.Parse(
number, region_code, &phone_number);
CHECK_EQ(error_type, PhoneNumberUtil::NO_PARSING_ERROR);
IGNORE_UNUSED(error_type);
return phone_number;
}
ShortNumberInfoTest() : short_info_() {
PhoneNumberUtil::GetInstance()->SetLogger(new StdoutLogger());
}
const PhoneNumberUtil phone_util_;
const ShortNumberInfo short_info_;
};
TEST_F(ShortNumberInfoTest, IsPossibleShortNumber) {
PhoneNumber possible_number;
possible_number.set_country_code(33);
possible_number.set_national_number(uint64{123456});
EXPECT_TRUE(short_info_.IsPossibleShortNumber(possible_number));
EXPECT_TRUE(short_info_.IsPossibleShortNumberForRegion(
ParseNumberForTesting("123456", RegionCode::FR()), RegionCode::FR()));
PhoneNumber impossible_number;
impossible_number.set_country_code(33);
impossible_number.set_national_number(uint64{9});
EXPECT_FALSE(short_info_.IsPossibleShortNumber(impossible_number));
PhoneNumber shared_number;
shared_number.set_country_code(44);
shared_number.set_national_number(uint64{11001});
EXPECT_TRUE(short_info_.IsPossibleShortNumber(shared_number));
}
TEST_F(ShortNumberInfoTest, IsValidShortNumber) {
PhoneNumber valid_number;
valid_number.set_country_code(33);
valid_number.set_national_number(uint64{1010});
EXPECT_TRUE(short_info_.IsValidShortNumber(valid_number));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("1010", RegionCode::FR()), RegionCode::FR()));
PhoneNumber invalid_number;
invalid_number.set_country_code(33);
invalid_number.set_national_number(uint64{123456});
EXPECT_FALSE(short_info_.IsValidShortNumber(invalid_number));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("123456", RegionCode::FR()), RegionCode::FR()));
PhoneNumber shared_number;
shared_number.set_country_code(44);
shared_number.set_national_number(uint64{18001});
EXPECT_TRUE(short_info_.IsValidShortNumber(shared_number));
}
TEST_F(ShortNumberInfoTest, IsCarrierSpecific) {
PhoneNumber carrier_specific_number;
carrier_specific_number.set_country_code(1);
carrier_specific_number.set_national_number(uint64{33669});
EXPECT_TRUE(short_info_.IsCarrierSpecific(carrier_specific_number));
EXPECT_TRUE(short_info_.IsCarrierSpecificForRegion(
ParseNumberForTesting("33669", RegionCode::US()), RegionCode::US()));
PhoneNumber not_carrier_specific_number;
not_carrier_specific_number.set_country_code(1);
not_carrier_specific_number.set_national_number(uint64{911});
EXPECT_FALSE(short_info_.IsCarrierSpecific(not_carrier_specific_number));
EXPECT_FALSE(short_info_.IsCarrierSpecificForRegion(
ParseNumberForTesting("911", RegionCode::US()), RegionCode::US()));
PhoneNumber carrier_specific_number_for_some_region;
carrier_specific_number_for_some_region.set_country_code(1);
carrier_specific_number_for_some_region.set_national_number(uint64{211});
EXPECT_TRUE(short_info_.IsCarrierSpecific(
carrier_specific_number_for_some_region));
EXPECT_TRUE(short_info_.IsCarrierSpecificForRegion(
carrier_specific_number_for_some_region, RegionCode::US()));
EXPECT_FALSE(short_info_.IsCarrierSpecificForRegion(
carrier_specific_number_for_some_region, RegionCode::BB()));
}
TEST_F(ShortNumberInfoTest, IsSmsService) {
PhoneNumber sms_service_number_for_some_region;
sms_service_number_for_some_region.set_country_code(1);
sms_service_number_for_some_region.set_national_number(uint64{21234});
EXPECT_TRUE(short_info_.IsSmsServiceForRegion(
sms_service_number_for_some_region, RegionCode::US()));
EXPECT_FALSE(short_info_.IsSmsServiceForRegion(
sms_service_number_for_some_region, RegionCode::BB()));
}
TEST_F(ShortNumberInfoTest, GetExpectedCost) {
uint64 national_number;
const string& premium_rate_example =
short_info_.GetExampleShortNumberForCost(
RegionCode::FR(), ShortNumberInfo::PREMIUM_RATE);
EXPECT_EQ(ShortNumberInfo::PREMIUM_RATE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(premium_rate_example, RegionCode::FR()),
RegionCode::FR()));
PhoneNumber premium_rate_number;
premium_rate_number.set_country_code(33);
safe_strtou64(premium_rate_example, &national_number);
premium_rate_number.set_national_number(national_number);
EXPECT_EQ(ShortNumberInfo::PREMIUM_RATE,
short_info_.GetExpectedCost(premium_rate_number));
const string& standard_rate_example =
short_info_.GetExampleShortNumberForCost(
RegionCode::FR(), ShortNumberInfo::STANDARD_RATE);
EXPECT_EQ(ShortNumberInfo::STANDARD_RATE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(standard_rate_example, RegionCode::FR()),
RegionCode::FR()));
PhoneNumber standard_rate_number;
standard_rate_number.set_country_code(33);
safe_strtou64(standard_rate_example, &national_number);
standard_rate_number.set_national_number(national_number);
EXPECT_EQ(ShortNumberInfo::STANDARD_RATE,
short_info_.GetExpectedCost(standard_rate_number));
const string& toll_free_example =
short_info_.GetExampleShortNumberForCost(
RegionCode::FR(), ShortNumberInfo::TOLL_FREE);
EXPECT_EQ(ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(toll_free_example, RegionCode::FR()),
RegionCode::FR()));
PhoneNumber toll_free_number;
toll_free_number.set_country_code(33);
safe_strtou64(toll_free_example, &national_number);
toll_free_number.set_national_number(national_number);
EXPECT_EQ(ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCost(toll_free_number));
EXPECT_EQ(
ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("12345", RegionCode::FR()), RegionCode::FR()));
PhoneNumber unknown_cost_number;
unknown_cost_number.set_country_code(33);
unknown_cost_number.set_national_number(uint64{12345});
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCost(unknown_cost_number));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("116123", RegionCode::FR()), RegionCode::FR()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("116123", RegionCode::FR()), RegionCode::FR()));
PhoneNumber invalid_number;
invalid_number.set_country_code(33);
invalid_number.set_national_number(uint64{116123});
EXPECT_FALSE(short_info_.IsValidShortNumber(invalid_number));
EXPECT_EQ(ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCost(invalid_number));
EXPECT_EQ(
ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("911", RegionCode::US()), RegionCode::ZZ()));
unknown_cost_number.Clear();
unknown_cost_number.set_country_code(123);
unknown_cost_number.set_national_number(uint64{911});
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCost(unknown_cost_number));
}
TEST_F(ShortNumberInfoTest, GetExpectedCostForSharedCountryCallingCode) {
string ambiguous_premium_rate_string("1234");
PhoneNumber ambiguous_premium_rate_number;
ambiguous_premium_rate_number.set_country_code(61);
ambiguous_premium_rate_number.set_national_number(uint64{1234});
string ambiguous_standard_rate_string("1194");
PhoneNumber ambiguous_standard_rate_number;
ambiguous_standard_rate_number.set_country_code(61);
ambiguous_standard_rate_number.set_national_number(uint64{1194});
string ambiguous_toll_free_string("733");
PhoneNumber ambiguous_toll_free_number;
ambiguous_toll_free_number.set_country_code(61);
ambiguous_toll_free_number.set_national_number(uint64{733});
EXPECT_TRUE(short_info_.IsValidShortNumber(ambiguous_premium_rate_number));
EXPECT_TRUE(short_info_.IsValidShortNumber(ambiguous_standard_rate_number));
EXPECT_TRUE(short_info_.IsValidShortNumber(ambiguous_toll_free_number));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_premium_rate_string, RegionCode::AU()),
RegionCode::AU()));
EXPECT_EQ(ShortNumberInfo::PREMIUM_RATE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_premium_rate_string,
RegionCode::AU()),
RegionCode::AU()));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_premium_rate_string, RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_premium_rate_string,
RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(ShortNumberInfo::PREMIUM_RATE,
short_info_.GetExpectedCost(ambiguous_premium_rate_number));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_standard_rate_string, RegionCode::AU()),
RegionCode::AU()));
EXPECT_EQ(ShortNumberInfo::STANDARD_RATE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_standard_rate_string,
RegionCode::AU()),
RegionCode::AU()));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_standard_rate_string, RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_standard_rate_string,
RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCost(ambiguous_standard_rate_number));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_toll_free_string, RegionCode::AU()),
RegionCode::AU()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_toll_free_string, RegionCode::AU()),
RegionCode::AU()));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting(ambiguous_toll_free_string, RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(
ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting(ambiguous_toll_free_string, RegionCode::CX()),
RegionCode::CX()));
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCost(ambiguous_toll_free_number));
}
TEST_F(ShortNumberInfoTest, GetExampleShortNumber) {
EXPECT_FALSE(short_info_.GetExampleShortNumber(RegionCode::AD()).empty());
EXPECT_FALSE(short_info_.GetExampleShortNumber(RegionCode::FR()).empty());
EXPECT_TRUE(short_info_.GetExampleShortNumber(RegionCode::UN001()).empty());
EXPECT_TRUE(
short_info_.GetExampleShortNumber(RegionCode::GetUnknown()).empty());
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumber_US) {
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("911", RegionCode::US()));
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("112", RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("999", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumberLongNumber_US) {
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("9116666666",
RegionCode::US()));
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("1126666666",
RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("9996666666",
RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumberWithFormatting_US) {
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("9-1-1", RegionCode::US()));
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("1-1-2", RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("9-9-9",
RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumberWithPlusSign_US) {
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("+911", RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("\xEF\xBC\x8B" "911",
RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber(" +911",
RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("+112", RegionCode::US()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("+999", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumber_BR) {
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("911", RegionCode::BR()));
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("190", RegionCode::BR()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("999", RegionCode::BR()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumberLongNumber_BR) {
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("9111", RegionCode::BR()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("1900", RegionCode::BR()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("9996", RegionCode::BR()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumber_CL) {
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("131", RegionCode::CL()));
EXPECT_TRUE(short_info_.ConnectsToEmergencyNumber("133", RegionCode::CL()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumberLongNumber_CL) {
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("1313", RegionCode::CL()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("1330", RegionCode::CL()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumber_AO) {
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("911", RegionCode::AO()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("222123456",
RegionCode::AO()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("923123456",
RegionCode::AO()));
}
TEST_F(ShortNumberInfoTest, ConnectsToEmergencyNumber_ZW) {
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("911", RegionCode::ZW()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("01312345",
RegionCode::ZW()));
EXPECT_FALSE(short_info_.ConnectsToEmergencyNumber("0711234567",
RegionCode::ZW()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumber_US) {
EXPECT_TRUE(short_info_.IsEmergencyNumber("911", RegionCode::US()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("112", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("999", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumberLongNumber_US) {
EXPECT_FALSE(short_info_.IsEmergencyNumber("9116666666", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("1126666666", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("9996666666", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumberWithFormatting_US) {
EXPECT_TRUE(short_info_.IsEmergencyNumber("9-1-1", RegionCode::US()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("*911", RegionCode::US()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("1-1-2", RegionCode::US()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("*112", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("9-9-9", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("*999", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumberWithPlusSign_US) {
EXPECT_FALSE(short_info_.IsEmergencyNumber("+911", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("\xEF\xBC\x8B" "911",
RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber(" +911", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("+112", RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("+999", RegionCode::US()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumber_BR) {
EXPECT_TRUE(short_info_.IsEmergencyNumber("911", RegionCode::BR()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("190", RegionCode::BR()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("999", RegionCode::BR()));
}
TEST_F(ShortNumberInfoTest, EmergencyNumberLongNumber_BR) {
EXPECT_FALSE(short_info_.IsEmergencyNumber("9111", RegionCode::BR()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("1900", RegionCode::BR()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("9996", RegionCode::BR()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumber_AO) {
EXPECT_FALSE(short_info_.IsEmergencyNumber("911", RegionCode::AO()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("222123456", RegionCode::AO()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("923123456", RegionCode::AO()));
}
TEST_F(ShortNumberInfoTest, IsEmergencyNumber_ZW) {
EXPECT_FALSE(short_info_.IsEmergencyNumber("911", RegionCode::ZW()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("01312345", RegionCode::ZW()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("0711234567", RegionCode::ZW()));
}
TEST_F(ShortNumberInfoTest, EmergencyNumberForSharedCountryCallingCode) {
EXPECT_TRUE(short_info_.IsEmergencyNumber("112", RegionCode::AU()));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("112", RegionCode::AU()), RegionCode::AU()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("112", RegionCode::AU()), RegionCode::AU()));
EXPECT_TRUE(short_info_.IsEmergencyNumber("112", RegionCode::CX()));
EXPECT_TRUE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("112", RegionCode::CX()), RegionCode::CX()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("112", RegionCode::CX()), RegionCode::CX()));
PhoneNumber shared_emergency_number;
shared_emergency_number.set_country_code(61);
shared_emergency_number.set_national_number(uint64{112});
EXPECT_TRUE(short_info_.IsValidShortNumber(shared_emergency_number));
EXPECT_EQ(ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCost(shared_emergency_number));
}
TEST_F(ShortNumberInfoTest, OverlappingNANPANumber) {
EXPECT_TRUE(short_info_.IsEmergencyNumber("211", RegionCode::BB()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("211", RegionCode::BB()), RegionCode::BB()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("211", RegionCode::US()));
EXPECT_EQ(
ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("211", RegionCode::US()), RegionCode::US()));
EXPECT_FALSE(short_info_.IsEmergencyNumber("211", RegionCode::CA()));
EXPECT_EQ(
ShortNumberInfo::TOLL_FREE,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("211", RegionCode::CA()), RegionCode::CA()));
}
TEST_F(ShortNumberInfoTest, CountryCallingCodeIsNotIgnored) {
EXPECT_FALSE(short_info_.IsPossibleShortNumberForRegion(
ParseNumberForTesting("+4640404", RegionCode::SE()), RegionCode::US()));
EXPECT_FALSE(short_info_.IsValidShortNumberForRegion(
ParseNumberForTesting("+4640404", RegionCode::SE()), RegionCode::US()));
EXPECT_EQ(ShortNumberInfo::UNKNOWN_COST,
short_info_.GetExpectedCostForRegion(
ParseNumberForTesting("+4640404", RegionCode::SE()),
RegionCode::US()));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/shortnumberinfo.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/shortnumberinfo_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
4851473d-8a48-4b60-bc60-b61715739037 | cpp | tensorflow/tensorflow | stablehlo_pad | tensorflow/lite/kernels/stablehlo_pad.cc | tensorflow/lite/kernels/stablehlo_pad_test.cc | #include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <numeric>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_pad {
namespace {
static constexpr int kMaxDims = TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT;
void FillBuffer(char* buffer, int64_t buffer_bytes, const char* data,
int64_t data_bytes) {
if (buffer_bytes == 0) {
return;
}
assert(buffer_bytes % data_bytes == 0);
std::memcpy(buffer, data, data_bytes);
buffer_bytes -= data_bytes;
while (buffer_bytes) {
const int64_t bytes = std::min(buffer_bytes, data_bytes);
std::memcpy(buffer + data_bytes, buffer, bytes);
buffer_bytes -= bytes;
data_bytes += bytes;
}
}
void StridedCopy(const int rank, const char* input, const int64_t* input_shape,
const int64_t* input_strides, char* output,
const int64_t* output_strides, const int64_t element_size,
const int depth) {
if (depth + 1 == rank) {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
std::memcpy(output, input, element_size);
input += input_strides[depth];
output += output_strides[depth];
}
} else {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
StridedCopy(rank, input, input_shape, input_strides, output,
output_strides, element_size, depth + 1);
input += input_strides[depth];
output += output_strides[depth];
}
}
}
class PadData {
public:
enum { kInput, kPaddingValue, kInputTensorCount };
enum { kOutput, kOutputTensorCount };
explicit PadData(const TfLiteStablehloPadParams& params) {
std::memcpy(
edge_pad_low_, params.edge_padding_low,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
std::memcpy(
edge_pad_high_, params.edge_padding_high,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
std::memcpy(
interior_pad_, params.interior_padding,
TFLITE_STABLEHLO_PAD_PARAMS_MAX_DIMENSION_COUNT * sizeof(int64_t));
}
void Setup(const int* dims, const int rank, const int64_t element_size) {
rank_ = rank;
element_size_ = element_size;
input_offset_ = 0;
output_offset_ = 0;
output_size_ = 0;
for (int i = 0; i < rank; ++i) {
output_shape_[i] = (dims[i] - 1) * (interior_pad_[i] + 1) + 1 +
edge_pad_low_[i] + edge_pad_high_[i];
}
if (std::any_of(output_shape_, output_shape_ + rank,
[](auto s) { return s <= 0; })) {
std::memset(input_shape_, 0, sizeof(input_shape_));
std::memset(output_shape_, 0, sizeof(output_shape_));
output_size_ = 0;
return;
}
output_dimension_sizes_[rank - 1] = element_size;
for (int i = rank - 2; i >= 0; --i) {
output_dimension_sizes_[i] =
output_shape_[i + 1] * output_dimension_sizes_[i + 1];
}
output_strides_[rank - 1] = element_size * (interior_pad_[rank - 1] + 1);
for (int i = rank - 2; i >= 0; --i) {
output_strides_[i] = output_dimension_sizes_[i] * (interior_pad_[i] + 1);
}
for (int i = 0; i < rank; ++i) {
output_offset_ +=
std::max<int64_t>(edge_pad_low_[i], 0) * output_dimension_sizes_[i];
}
output_size_ = std::accumulate(output_shape_, output_shape_ + rank,
element_size, std::multiplies<>());
input_strides_[rank - 1] = element_size;
for (int i = rank - 1; i >= 1; --i) {
input_strides_[i - 1] = dims[i] * input_strides_[i];
}
auto DivNegRoundAwayOrZero = [](int64_t num, int64_t denum) -> int64_t {
assert(denum > 0);
return num < 0 ? (num - denum + 1) / denum : 0;
};
for (int i = 0; i < rank; ++i) {
input_shape_[i] =
dims[i] +
DivNegRoundAwayOrZero(edge_pad_low_[i], interior_pad_[i] + 1) +
DivNegRoundAwayOrZero(edge_pad_high_[i], interior_pad_[i] + 1);
}
for (int i = 0; i < rank; ++i) {
input_offset_ -=
DivNegRoundAwayOrZero(edge_pad_low_[i], interior_pad_[i] + 1) *
input_strides_[i];
if (edge_pad_low_[i] < 0) {
int64_t tmp_offset = ((interior_pad_[i] + 1 + edge_pad_low_[i]) %
(interior_pad_[i] + 1));
if (tmp_offset < 0) {
tmp_offset += interior_pad_[i] + 1;
}
output_offset_ += tmp_offset * output_dimension_sizes_[i];
}
}
}
void Apply(const char* input, const char* padding_value, char* output) const {
FillBuffer(output, output_size_, padding_value, element_size_);
StridedCopy(rank_, input + input_offset_, input_shape_, input_strides_,
output + output_offset_, output_strides_, element_size_,
0);
}
TfLiteIntArray* BuildOuputTensorDims() const {
TfLiteIntArray* dims = TfLiteIntArrayCreate(rank_);
for (int64_t i = 0; i < rank_; ++i) {
dims->data[i] = output_shape_[i];
}
return dims;
}
private:
int64_t edge_pad_low_[kMaxDims];
int64_t edge_pad_high_[kMaxDims];
int64_t interior_pad_[kMaxDims];
int64_t rank_ = 0;
int64_t element_size_ = 0;
int64_t input_shape_[kMaxDims];
int64_t output_shape_[kMaxDims];
int64_t input_strides_[kMaxDims];
int64_t output_strides_[kMaxDims];
int64_t output_dimension_sizes_[kMaxDims];
int64_t input_offset_ = 0;
int64_t output_offset_ = 0;
int64_t output_size_ = 0;
};
void* Init(TfLiteContext* context, const char* options, size_t options_len) {
return new PadData(
*reinterpret_cast<const TfLiteStablehloPadParams*>(options));
}
void Free(TfLiteContext* context, void* node_data) {
delete reinterpret_cast<PadData*>(node_data);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor = GetInput(context, node, PadData::kInput);
const TfLiteTensor* padding_value_tensor =
GetInput(context, node, PadData::kPaddingValue);
TF_LITE_ENSURE(context, input_tensor->type == padding_value_tensor->type);
size_t element_size;
TF_LITE_ENSURE(context, GetSizeOfType(context, input_tensor->type,
&element_size) == kTfLiteOk);
PadData& pad_data = *reinterpret_cast<PadData*>(node->user_data);
pad_data.Setup(input_tensor->dims->data, input_tensor->dims->size,
element_size);
TfLiteTensor* output_tensor = GetOutput(context, node, PadData::kOutput);
TF_LITE_ENSURE(context, input_tensor->type == output_tensor->type);
context->ResizeTensor(context, output_tensor,
pad_data.BuildOuputTensorDims());
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input_tensor = GetInput(context, node, PadData::kInput);
const TfLiteTensor* padding_value_tensor =
GetInput(context, node, PadData::kPaddingValue);
TfLiteTensor* output_tensor = GetOutput(context, node, PadData::kOutput);
PadData& pad_data = *reinterpret_cast<PadData*>(node->user_data);
pad_data.Apply(input_tensor->data.raw_const,
padding_value_tensor->data.raw_const, output_tensor->data.raw);
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_STABLEHLO_PAD() {
static TfLiteRegistration r = {stablehlo_pad::Init,
stablehlo_pad::Free,
stablehlo_pad::Prepare,
stablehlo_pad::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_pad {
namespace {
using testing::ElementsAre;
using testing::ElementsAreArray;
using testing::HasSubstr;
template <class T>
class StablehloPadModel : public SingleOpModel {
public:
static constexpr TensorType kTensorType = GetTensorType<T>();
void SetEdgePadding(std::vector<int64_t> low, std::vector<int64_t> high) {
edge_padding_low_ = std::move(low);
edge_padding_high_ = std::move(high);
}
const std::vector<int64_t>& GetEdgePaddingLow() const {
return edge_padding_low_;
}
const std::vector<int64_t>& GetEdgePaddingHigh() const {
return edge_padding_high_;
}
void SetInteriorPadding(std::vector<int64_t> padding) {
interior_padding_ = std::move(padding);
}
const std::vector<int64_t>& GetInteriorPadding() const {
return interior_padding_;
}
void SetInput(std::vector<int64_t> shape) {
input_.shape = shape;
input_.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(input_.data, static_cast<T>(1));
}
void SetInput(std::vector<int64_t> shape, std::vector<T> data) {
input_.shape = shape;
input_.data = data;
}
void SetInput(absl::Span<const int64_t> shape, absl::BitGenRef bitgen, T min,
T max) {
input_.shape.assign(shape.begin(), shape.end());
input_.data.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_generate(input_.data, [&] {
return absl::Uniform(absl::IntervalClosed, bitgen, min, max);
});
}
const reduce_window::reference::Tensor<T>& GetInput() const { return input_; }
void SetPaddingValue(const T& v) { padding_value_ = v; }
T GetPaddingValue() const { return padding_value_; }
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_tensor_id_),
GetTensorSize(output_tensor_id_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape =
*(interpreter_->tensor(output_tensor_id_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
absl::Status CheckPreconditions() {
const size_t rank = input_.shape.size();
if (rank == 0) {
return absl::FailedPreconditionError("Input rank is 0.");
}
if (edge_padding_low_.empty()) {
edge_padding_low_ = std::vector<int64_t>(rank, 0);
} else if (edge_padding_low_.size() != rank) {
return absl::FailedPreconditionError(
"Low edge padding does not have the right size.");
}
if (edge_padding_high_.empty()) {
edge_padding_high_ = std::vector<int64_t>(rank, 0);
} else if (edge_padding_high_.size() != rank) {
return absl::FailedPreconditionError(
"High edge padding does not have the right size.");
}
if (interior_padding_.empty()) {
interior_padding_ = std::vector<int64_t>(rank, 0);
} else if (interior_padding_.size() != rank) {
return absl::FailedPreconditionError(
"Interior padding does not have the right size.");
}
return absl::OkStatus();
}
absl::Status Build() {
if (absl::Status status = CheckPreconditions(); !status.ok()) {
return status;
}
input_tensor_id_ =
AddInput({kTensorType,
std::vector<int>(input_.shape.begin(), input_.shape.end())});
padding_value_tensor_id_ =
AddConstInput(kTensorType, {padding_value_}, {1});
output_tensor_id_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_STABLEHLO_PAD,
BuiltinOptions2_StablehloPadOptions,
CreateStablehloPadOptions(
builder_, builder_.CreateVector(edge_padding_low_),
builder_.CreateVector(edge_padding_high_),
builder_.CreateVector(interior_padding_))
.Union());
BuildInterpreter(
{std::vector<int>(input_.shape.begin(),
input_.shape.end())},
-1, false,
true, false,
false);
AllocateAndDelegate(true);
PopulateTensor(input_tensor_id_, input_.data);
return absl::OkStatus();
}
absl::Status BuildAndInvoke() {
if (absl::Status status = Build(); !status.ok()) {
return status;
}
if (TfLiteStatus status = Invoke(); status != kTfLiteOk) {
const std::string msg =
absl::StrFormat("Invoke failed with status %d.", status);
return absl::InternalError(msg);
}
return absl::OkStatus();
}
friend std::ostream& operator<<(std::ostream& os,
const StablehloPadModel& model) {
auto print_vec = [&os](const auto& vec) {
os << "[";
if (!vec.empty()) {
auto it = vec.begin();
os << +*(it++);
for (; it != vec.end(); ++it) {
os << ", " << +*it;
}
}
os << "]";
};
os << " edge_padding_low: ";
print_vec(model.GetEdgePaddingLow());
os << "\n edge_padding_high: ";
print_vec(model.GetEdgePaddingHigh());
os << "\n interior_padding: ";
print_vec(model.GetInteriorPadding());
os << "\n padding_value: " << +model.GetPaddingValue();
os << "\n input shape: ";
print_vec(model.GetInput().shape);
return os;
}
private:
std::vector<int64_t> edge_padding_low_;
std::vector<int64_t> edge_padding_high_;
std::vector<int64_t> interior_padding_;
reduce_window::reference::Tensor<T> input_;
T padding_value_ = 0;
int input_tensor_id_;
int padding_value_tensor_id_;
int output_tensor_id_;
};
template <class T>
absl::StatusOr<reduce_window::reference::Tensor<T>> ComputeReference(
StablehloPadModel<T>& model) {
if (absl::Status status = model.CheckPreconditions(); !status.ok()) {
return status;
}
std::vector<int64_t> dilations, padding;
for (size_t i = 0; i < model.GetInput().shape.size(); ++i) {
padding.push_back(model.GetEdgePaddingLow()[i]);
padding.push_back(model.GetEdgePaddingHigh()[i]);
dilations.push_back(model.GetInteriorPadding()[i] + 1);
}
auto dilated_tensor = reduce_window::reference::Dilate(
model.GetInput(), dilations, model.GetPaddingValue());
auto padded_tensor = reduce_window::reference::Pad(dilated_tensor, padding,
model.GetPaddingValue());
return reduce_window::reference::Crop(padded_tensor, padding);
}
TEST(StablehloPadModelTest, DefaultModelFails) {
StablehloPadModel<int> model;
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Input rank is 0."));
}
TEST(StablehloPadModelTest, DefaultModelReturnsIdentity) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
EXPECT_THAT(model.GetInput().shape, ElementsAre(3, 1));
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
EXPECT_THAT(expected_status.value().data,
ElementsAreArray(model.GetInput().data));
}
TEST(StablehloPadModelTest, WrongEdgePaddingSizeIsAnError) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
model.SetEdgePadding({3, 4, 5}, {6, 7});
{
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Low edge padding does not have the right size."));
}
model.SetEdgePadding({3, 4}, {5, 6, 7});
{
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("High edge padding does not have the right size."));
}
}
TEST(StablehloPadModelTest, WrongInteriorPaddingSizeIsAnError) {
StablehloPadModel<int> model;
model.SetInput({3, 1});
model.SetInteriorPadding({3, 4, 5});
const auto expected_status = ComputeReference(model);
EXPECT_FALSE(expected_status.ok());
EXPECT_EQ(expected_status.status().code(),
absl::StatusCode::kFailedPrecondition);
EXPECT_THAT(expected_status.status().message(),
HasSubstr("Interior padding does not have the right size."));
}
TEST(StablehloPadTest, IdentityParams) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(model.GetInput().shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(model.GetInput().data));
}
TEST(StablehloPadTest, InteriorPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, LowPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, 1}, {0, 0});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, HighPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({0, 0}, {1, 1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, AllPad) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, 1}, {1, 1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, LowCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -1}, {0, 0});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, HighCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({0, 0}, {-1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, AllCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -1}, {-1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, PadCrop) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({1, -1}, {1, -1});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, InteriorEdgePadding) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -4}, {0, 0});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
TEST(StablehloPadTest, CallPrepareTwiceDoesNotFail) {
StablehloPadModel<int> model;
model.SetInput({3, 3});
model.SetEdgePadding({-1, -4}, {0, 0});
model.SetInteriorPadding({1, 2});
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
model.SetApplyDefaultDelegates();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data));
}
template <class T>
std::vector<T> RandomVector(absl::BitGen& bitgen, size_t size, T min, T max) {
std::vector<T> vec(size);
for (T& v : vec) {
v = absl::Uniform(absl::IntervalClosed, bitgen, min, max);
}
return vec;
}
template <class T>
class StablehloPadFuzzyTest : public testing::Test {};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(StablehloPadFuzzyTest, TestList);
TYPED_TEST(StablehloPadFuzzyTest, FuzzyTest) {
absl::BitGen bitgen;
for (size_t iteration = 0; iteration < 10000; ++iteration) {
const int rank = absl::Uniform(absl::IntervalClosed, bitgen, 1, 2);
StablehloPadModel<TypeParam> model;
model.SetInput(
RandomVector<int64_t>(bitgen, rank, 1, 3),
bitgen, -5, 5);
model.SetInteriorPadding(
RandomVector<int64_t>(bitgen, rank, 0, 2));
model.SetEdgePadding(
RandomVector<int64_t>(bitgen, rank, -5, 5),
RandomVector<int64_t>(bitgen, rank, -5, 5));
model.SetPaddingValue(
absl::Uniform(absl::IntervalClosed, bitgen, -127, 127));
const auto expected_status = ComputeReference(model);
ASSERT_TRUE(expected_status.ok());
const auto& expected = expected_status.value();
ASSERT_TRUE(model.BuildAndInvoke().ok());
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape))
<< model;
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data))
<< model;
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_pad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_pad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
45a0f00c-97a3-4a6b-a58a-8ad1ecc2fa36 | cpp | google/cel-cpp | navigable_ast | tools/navigable_ast.cc | tools/navigable_ast_test.cc | #include "tools/navigable_ast.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_check.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "eval/public/ast_traverse.h"
#include "eval/public/ast_visitor.h"
#include "eval/public/ast_visitor_base.h"
#include "eval/public/source_position.h"
namespace cel {
namespace tools_internal {
AstNodeData& AstMetadata::NodeDataAt(size_t index) {
ABSL_CHECK(index < nodes.size());
return nodes[index]->data_;
}
size_t AstMetadata::AddNode() {
size_t index = nodes.size();
nodes.push_back(absl::WrapUnique(new AstNode()));
return index;
}
}
namespace {
using google::api::expr::v1alpha1::Expr;
using google::api::expr::runtime::AstTraverse;
using google::api::expr::runtime::SourcePosition;
NodeKind GetNodeKind(const Expr& expr) {
switch (expr.expr_kind_case()) {
case Expr::kConstExpr:
return NodeKind::kConstant;
case Expr::kIdentExpr:
return NodeKind::kIdent;
case Expr::kSelectExpr:
return NodeKind::kSelect;
case Expr::kCallExpr:
return NodeKind::kCall;
case Expr::kListExpr:
return NodeKind::kList;
case Expr::kStructExpr:
if (!expr.struct_expr().message_name().empty()) {
return NodeKind::kStruct;
} else {
return NodeKind::kMap;
}
case Expr::kComprehensionExpr:
return NodeKind::kComprehension;
case Expr::EXPR_KIND_NOT_SET:
default:
return NodeKind::kUnspecified;
}
}
ChildKind GetChildKind(const tools_internal::AstNodeData& parent_node,
size_t child_index) {
constexpr size_t kComprehensionRangeArgIndex =
google::api::expr::runtime::ITER_RANGE;
constexpr size_t kComprehensionInitArgIndex =
google::api::expr::runtime::ACCU_INIT;
constexpr size_t kComprehensionConditionArgIndex =
google::api::expr::runtime::LOOP_CONDITION;
constexpr size_t kComprehensionLoopStepArgIndex =
google::api::expr::runtime::LOOP_STEP;
constexpr size_t kComprehensionResultArgIndex =
google::api::expr::runtime::RESULT;
switch (parent_node.node_kind) {
case NodeKind::kStruct:
return ChildKind::kStructValue;
case NodeKind::kMap:
if (child_index % 2 == 0) {
return ChildKind::kMapKey;
}
return ChildKind::kMapValue;
case NodeKind::kList:
return ChildKind::kListElem;
case NodeKind::kSelect:
return ChildKind::kSelectOperand;
case NodeKind::kCall:
if (child_index == 0 && parent_node.expr->call_expr().has_target()) {
return ChildKind::kCallReceiver;
}
return ChildKind::kCallArg;
case NodeKind::kComprehension:
switch (child_index) {
case kComprehensionRangeArgIndex:
return ChildKind::kComprehensionRange;
case kComprehensionInitArgIndex:
return ChildKind::kComprehensionInit;
case kComprehensionConditionArgIndex:
return ChildKind::kComprehensionCondition;
case kComprehensionLoopStepArgIndex:
return ChildKind::kComprehensionLoopStep;
case kComprehensionResultArgIndex:
return ChildKind::kComprensionResult;
default:
return ChildKind::kUnspecified;
}
default:
return ChildKind::kUnspecified;
}
}
class NavigableExprBuilderVisitor
: public google::api::expr::runtime::AstVisitorBase {
public:
NavigableExprBuilderVisitor()
: metadata_(std::make_unique<tools_internal::AstMetadata>()) {}
void PreVisitExpr(const Expr* expr, const SourcePosition* position) override {
AstNode* parent = parent_stack_.empty()
? nullptr
: metadata_->nodes[parent_stack_.back()].get();
size_t index = metadata_->AddNode();
tools_internal::AstNodeData& node_data = metadata_->NodeDataAt(index);
node_data.parent = parent;
node_data.expr = expr;
node_data.parent_relation = ChildKind::kUnspecified;
node_data.node_kind = GetNodeKind(*expr);
node_data.weight = 1;
node_data.index = index;
node_data.metadata = metadata_.get();
metadata_->id_to_node.insert({expr->id(), index});
metadata_->expr_to_node.insert({expr, index});
if (!parent_stack_.empty()) {
auto& parent_node_data = metadata_->NodeDataAt(parent_stack_.back());
size_t child_index = parent_node_data.children.size();
parent_node_data.children.push_back(metadata_->nodes[index].get());
node_data.parent_relation = GetChildKind(parent_node_data, child_index);
}
parent_stack_.push_back(index);
}
void PostVisitExpr(const Expr* expr,
const SourcePosition* position) override {
size_t idx = parent_stack_.back();
parent_stack_.pop_back();
metadata_->postorder.push_back(metadata_->nodes[idx].get());
tools_internal::AstNodeData& node = metadata_->NodeDataAt(idx);
if (!parent_stack_.empty()) {
tools_internal::AstNodeData& parent_node_data =
metadata_->NodeDataAt(parent_stack_.back());
parent_node_data.weight += node.weight;
}
}
std::unique_ptr<tools_internal::AstMetadata> Consume() && {
return std::move(metadata_);
}
private:
std::unique_ptr<tools_internal::AstMetadata> metadata_;
std::vector<size_t> parent_stack_;
};
}
std::string ChildKindName(ChildKind kind) {
switch (kind) {
case ChildKind::kUnspecified:
return "Unspecified";
case ChildKind::kSelectOperand:
return "SelectOperand";
case ChildKind::kCallReceiver:
return "CallReceiver";
case ChildKind::kCallArg:
return "CallArg";
case ChildKind::kListElem:
return "ListElem";
case ChildKind::kMapKey:
return "MapKey";
case ChildKind::kMapValue:
return "MapValue";
case ChildKind::kStructValue:
return "StructValue";
case ChildKind::kComprehensionRange:
return "ComprehensionRange";
case ChildKind::kComprehensionInit:
return "ComprehensionInit";
case ChildKind::kComprehensionCondition:
return "ComprehensionCondition";
case ChildKind::kComprehensionLoopStep:
return "ComprehensionLoopStep";
case ChildKind::kComprensionResult:
return "ComprehensionResult";
default:
return absl::StrCat("Unknown ChildKind ", static_cast<int>(kind));
}
}
std::string NodeKindName(NodeKind kind) {
switch (kind) {
case NodeKind::kUnspecified:
return "Unspecified";
case NodeKind::kConstant:
return "Constant";
case NodeKind::kIdent:
return "Ident";
case NodeKind::kSelect:
return "Select";
case NodeKind::kCall:
return "Call";
case NodeKind::kList:
return "List";
case NodeKind::kMap:
return "Map";
case NodeKind::kStruct:
return "Struct";
case NodeKind::kComprehension:
return "Comprehension";
default:
return absl::StrCat("Unknown NodeKind ", static_cast<int>(kind));
}
}
int AstNode::child_index() const {
if (data_.parent == nullptr) {
return -1;
}
int i = 0;
for (const AstNode* ptr : data_.parent->children()) {
if (ptr->expr() == expr()) {
return i;
}
i++;
}
return -1;
}
AstNode::PreorderRange AstNode::DescendantsPreorder() const {
return AstNode::PreorderRange(absl::MakeConstSpan(data_.metadata->nodes)
.subspan(data_.index, data_.weight));
}
AstNode::PostorderRange AstNode::DescendantsPostorder() const {
return AstNode::PostorderRange(absl::MakeConstSpan(data_.metadata->postorder)
.subspan(data_.index, data_.weight));
}
NavigableAst NavigableAst::Build(const Expr& expr) {
NavigableExprBuilderVisitor visitor;
AstTraverse(&expr, nullptr, &visitor);
return NavigableAst(std::move(visitor).Consume());
}
} | #include "tools/navigable_ast.h"
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/base/casts.h"
#include "absl/strings/str_cat.h"
#include "base/builtins.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace cel {
namespace {
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::parser::Parse;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::SizeIs;
TEST(NavigableAst, Basic) {
Expr const_node;
const_node.set_id(1);
const_node.mutable_const_expr()->set_int64_value(42);
NavigableAst ast = NavigableAst::Build(const_node);
EXPECT_TRUE(ast.IdsAreUnique());
const AstNode& root = ast.Root();
EXPECT_EQ(root.expr(), &const_node);
EXPECT_THAT(root.children(), IsEmpty());
EXPECT_TRUE(root.parent() == nullptr);
EXPECT_EQ(root.child_index(), -1);
EXPECT_EQ(root.node_kind(), NodeKind::kConstant);
EXPECT_EQ(root.parent_relation(), ChildKind::kUnspecified);
}
TEST(NavigableAst, DefaultCtorEmpty) {
Expr const_node;
const_node.set_id(1);
const_node.mutable_const_expr()->set_int64_value(42);
NavigableAst ast = NavigableAst::Build(const_node);
EXPECT_EQ(ast, ast);
NavigableAst empty;
EXPECT_NE(ast, empty);
EXPECT_EQ(empty, empty);
EXPECT_TRUE(static_cast<bool>(ast));
EXPECT_FALSE(static_cast<bool>(empty));
NavigableAst moved = std::move(ast);
EXPECT_EQ(ast, empty);
EXPECT_FALSE(static_cast<bool>(ast));
EXPECT_TRUE(static_cast<bool>(moved));
}
TEST(NavigableAst, FindById) {
Expr const_node;
const_node.set_id(1);
const_node.mutable_const_expr()->set_int64_value(42);
NavigableAst ast = NavigableAst::Build(const_node);
const AstNode& root = ast.Root();
EXPECT_EQ(ast.FindId(const_node.id()), &root);
EXPECT_EQ(ast.FindId(-1), nullptr);
}
MATCHER_P(AstNodeWrapping, expr, "") {
const AstNode* ptr = arg;
return ptr != nullptr && ptr->expr() == expr;
}
TEST(NavigableAst, ToleratesNonUnique) {
Expr call_node;
call_node.set_id(1);
call_node.mutable_call_expr()->set_function(cel::builtin::kNot);
Expr* const_node = call_node.mutable_call_expr()->add_args();
const_node->mutable_const_expr()->set_bool_value(false);
const_node->set_id(1);
NavigableAst ast = NavigableAst::Build(call_node);
const AstNode& root = ast.Root();
EXPECT_EQ(ast.FindId(1), &root);
EXPECT_EQ(ast.FindExpr(&call_node), &root);
EXPECT_FALSE(ast.IdsAreUnique());
EXPECT_THAT(ast.FindExpr(const_node), AstNodeWrapping(const_node));
}
TEST(NavigableAst, FindByExprPtr) {
Expr const_node;
const_node.set_id(1);
const_node.mutable_const_expr()->set_int64_value(42);
NavigableAst ast = NavigableAst::Build(const_node);
const AstNode& root = ast.Root();
EXPECT_EQ(ast.FindExpr(&const_node), &root);
EXPECT_EQ(ast.FindExpr(&Expr::default_instance()), nullptr);
}
TEST(NavigableAst, Children) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("1 + 2"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.expr(), &parsed_expr.expr());
EXPECT_THAT(root.children(), SizeIs(2));
EXPECT_TRUE(root.parent() == nullptr);
EXPECT_EQ(root.child_index(), -1);
EXPECT_EQ(root.parent_relation(), ChildKind::kUnspecified);
EXPECT_EQ(root.node_kind(), NodeKind::kCall);
EXPECT_THAT(
root.children(),
ElementsAre(AstNodeWrapping(&parsed_expr.expr().call_expr().args(0)),
AstNodeWrapping(&parsed_expr.expr().call_expr().args(1))));
ASSERT_THAT(root.children(), SizeIs(2));
const auto* child1 = root.children()[0];
EXPECT_EQ(child1->child_index(), 0);
EXPECT_EQ(child1->parent(), &root);
EXPECT_EQ(child1->parent_relation(), ChildKind::kCallArg);
EXPECT_EQ(child1->node_kind(), NodeKind::kConstant);
EXPECT_THAT(child1->children(), IsEmpty());
const auto* child2 = root.children()[1];
EXPECT_EQ(child2->child_index(), 1);
}
TEST(NavigableAst, UnspecifiedExpr) {
Expr expr;
expr.set_id(1);
NavigableAst ast = NavigableAst::Build(expr);
const AstNode& root = ast.Root();
EXPECT_EQ(root.expr(), &expr);
EXPECT_THAT(root.children(), SizeIs(0));
EXPECT_TRUE(root.parent() == nullptr);
EXPECT_EQ(root.child_index(), -1);
EXPECT_EQ(root.node_kind(), NodeKind::kUnspecified);
}
TEST(NavigableAst, ParentRelationSelect) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("a.b"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
ASSERT_THAT(root.children(), SizeIs(1));
const auto* child = root.children()[0];
EXPECT_EQ(child->parent_relation(), ChildKind::kSelectOperand);
EXPECT_EQ(child->node_kind(), NodeKind::kIdent);
}
TEST(NavigableAst, ParentRelationCallReceiver) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("a.b()"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
ASSERT_THAT(root.children(), SizeIs(1));
const auto* child = root.children()[0];
EXPECT_EQ(child->parent_relation(), ChildKind::kCallReceiver);
EXPECT_EQ(child->node_kind(), NodeKind::kIdent);
}
TEST(NavigableAst, ParentRelationCreateStruct) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr,
Parse("com.example.Type{field: '123'}"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kStruct);
ASSERT_THAT(root.children(), SizeIs(1));
const auto* child = root.children()[0];
EXPECT_EQ(child->parent_relation(), ChildKind::kStructValue);
EXPECT_EQ(child->node_kind(), NodeKind::kConstant);
}
TEST(NavigableAst, ParentRelationCreateMap) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("{'a': 123}"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kMap);
ASSERT_THAT(root.children(), SizeIs(2));
const auto* key = root.children()[0];
const auto* value = root.children()[1];
EXPECT_EQ(key->parent_relation(), ChildKind::kMapKey);
EXPECT_EQ(key->node_kind(), NodeKind::kConstant);
EXPECT_EQ(value->parent_relation(), ChildKind::kMapValue);
EXPECT_EQ(value->node_kind(), NodeKind::kConstant);
}
TEST(NavigableAst, ParentRelationCreateList) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("[123]"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kList);
ASSERT_THAT(root.children(), SizeIs(1));
const auto* child = root.children()[0];
EXPECT_EQ(child->parent_relation(), ChildKind::kListElem);
EXPECT_EQ(child->node_kind(), NodeKind::kConstant);
}
TEST(NavigableAst, ParentRelationComprehension) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("[1].all(x, x < 2)"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kComprehension);
ASSERT_THAT(root.children(), SizeIs(5));
const auto* range = root.children()[0];
const auto* init = root.children()[1];
const auto* condition = root.children()[2];
const auto* step = root.children()[3];
const auto* finish = root.children()[4];
EXPECT_EQ(range->parent_relation(), ChildKind::kComprehensionRange);
EXPECT_EQ(init->parent_relation(), ChildKind::kComprehensionInit);
EXPECT_EQ(condition->parent_relation(), ChildKind::kComprehensionCondition);
EXPECT_EQ(step->parent_relation(), ChildKind::kComprehensionLoopStep);
EXPECT_EQ(finish->parent_relation(), ChildKind::kComprensionResult);
}
TEST(NavigableAst, DescendantsPostorder) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("1 + (x * 3)"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kCall);
std::vector<int> constants;
std::vector<NodeKind> node_kinds;
for (const AstNode& node : root.DescendantsPostorder()) {
if (node.node_kind() == NodeKind::kConstant) {
constants.push_back(node.expr()->const_expr().int64_value());
}
node_kinds.push_back(node.node_kind());
}
EXPECT_THAT(node_kinds, ElementsAre(NodeKind::kConstant, NodeKind::kIdent,
NodeKind::kConstant, NodeKind::kCall,
NodeKind::kCall));
EXPECT_THAT(constants, ElementsAre(1, 3));
}
TEST(NavigableAst, DescendantsPreorder) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("1 + (x * 3)"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kCall);
std::vector<int> constants;
std::vector<NodeKind> node_kinds;
for (const AstNode& node : root.DescendantsPreorder()) {
if (node.node_kind() == NodeKind::kConstant) {
constants.push_back(node.expr()->const_expr().int64_value());
}
node_kinds.push_back(node.node_kind());
}
EXPECT_THAT(node_kinds,
ElementsAre(NodeKind::kCall, NodeKind::kConstant, NodeKind::kCall,
NodeKind::kIdent, NodeKind::kConstant));
EXPECT_THAT(constants, ElementsAre(1, 3));
}
TEST(NavigableAst, DescendantsPreorderComprehension) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("[1, 2, 3].map(x, x + 1)"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kComprehension);
std::vector<std::pair<NodeKind, ChildKind>> node_kinds;
for (const AstNode& node : root.DescendantsPreorder()) {
node_kinds.push_back(
std::make_pair(node.node_kind(), node.parent_relation()));
}
EXPECT_THAT(
node_kinds,
ElementsAre(Pair(NodeKind::kComprehension, ChildKind::kUnspecified),
Pair(NodeKind::kList, ChildKind::kComprehensionRange),
Pair(NodeKind::kConstant, ChildKind::kListElem),
Pair(NodeKind::kConstant, ChildKind::kListElem),
Pair(NodeKind::kConstant, ChildKind::kListElem),
Pair(NodeKind::kList, ChildKind::kComprehensionInit),
Pair(NodeKind::kConstant, ChildKind::kComprehensionCondition),
Pair(NodeKind::kCall, ChildKind::kComprehensionLoopStep),
Pair(NodeKind::kIdent, ChildKind::kCallArg),
Pair(NodeKind::kList, ChildKind::kCallArg),
Pair(NodeKind::kCall, ChildKind::kListElem),
Pair(NodeKind::kIdent, ChildKind::kCallArg),
Pair(NodeKind::kConstant, ChildKind::kCallArg),
Pair(NodeKind::kIdent, ChildKind::kComprensionResult)));
}
TEST(NavigableAst, DescendantsPreorderCreateMap) {
ASSERT_OK_AND_ASSIGN(auto parsed_expr, Parse("{'key1': 1, 'key2': 2}"));
NavigableAst ast = NavigableAst::Build(parsed_expr.expr());
const AstNode& root = ast.Root();
EXPECT_EQ(root.node_kind(), NodeKind::kMap);
std::vector<std::pair<NodeKind, ChildKind>> node_kinds;
for (const AstNode& node : root.DescendantsPreorder()) {
node_kinds.push_back(
std::make_pair(node.node_kind(), node.parent_relation()));
}
EXPECT_THAT(node_kinds,
ElementsAre(Pair(NodeKind::kMap, ChildKind::kUnspecified),
Pair(NodeKind::kConstant, ChildKind::kMapKey),
Pair(NodeKind::kConstant, ChildKind::kMapValue),
Pair(NodeKind::kConstant, ChildKind::kMapKey),
Pair(NodeKind::kConstant, ChildKind::kMapValue)));
}
TEST(NodeKind, Stringify) {
EXPECT_EQ(absl::StrCat(NodeKind::kConstant), "Constant");
EXPECT_EQ(absl::StrCat(NodeKind::kIdent), "Ident");
EXPECT_EQ(absl::StrCat(NodeKind::kSelect), "Select");
EXPECT_EQ(absl::StrCat(NodeKind::kCall), "Call");
EXPECT_EQ(absl::StrCat(NodeKind::kList), "List");
EXPECT_EQ(absl::StrCat(NodeKind::kMap), "Map");
EXPECT_EQ(absl::StrCat(NodeKind::kStruct), "Struct");
EXPECT_EQ(absl::StrCat(NodeKind::kComprehension), "Comprehension");
EXPECT_EQ(absl::StrCat(NodeKind::kUnspecified), "Unspecified");
EXPECT_EQ(absl::StrCat(absl::bit_cast<NodeKind>(255)),
"Unknown NodeKind 255");
}
TEST(ChildKind, Stringify) {
EXPECT_EQ(absl::StrCat(ChildKind::kSelectOperand), "SelectOperand");
EXPECT_EQ(absl::StrCat(ChildKind::kCallReceiver), "CallReceiver");
EXPECT_EQ(absl::StrCat(ChildKind::kCallArg), "CallArg");
EXPECT_EQ(absl::StrCat(ChildKind::kListElem), "ListElem");
EXPECT_EQ(absl::StrCat(ChildKind::kMapKey), "MapKey");
EXPECT_EQ(absl::StrCat(ChildKind::kMapValue), "MapValue");
EXPECT_EQ(absl::StrCat(ChildKind::kStructValue), "StructValue");
EXPECT_EQ(absl::StrCat(ChildKind::kComprehensionRange), "ComprehensionRange");
EXPECT_EQ(absl::StrCat(ChildKind::kComprehensionInit), "ComprehensionInit");
EXPECT_EQ(absl::StrCat(ChildKind::kComprehensionCondition),
"ComprehensionCondition");
EXPECT_EQ(absl::StrCat(ChildKind::kComprehensionLoopStep),
"ComprehensionLoopStep");
EXPECT_EQ(absl::StrCat(ChildKind::kComprensionResult), "ComprehensionResult");
EXPECT_EQ(absl::StrCat(ChildKind::kUnspecified), "Unspecified");
EXPECT_EQ(absl::StrCat(absl::bit_cast<ChildKind>(255)),
"Unknown ChildKind 255");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/tools/navigable_ast.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/tools/navigable_ast_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b97172ce-bd0b-4d5c-b376-b2377221647b | cpp | tensorflow/tensorflow | statusor | tensorflow/core/platform/statusor.h | third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_STATUSOR_H_
#define TENSORFLOW_CORE_PLATFORM_STATUSOR_H_
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
using tsl::StatusOr;
}
#endif | #include "tsl/platform/statusor.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/config.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
class Base1 {
public:
virtual ~Base1() {}
int pad_;
};
class Base2 {
public:
virtual ~Base2() {}
int yetotherpad_;
};
class Derived : public Base1, public Base2 {
public:
~Derived() override {}
int evenmorepad_;
};
class CopyNoAssign {
public:
explicit CopyNoAssign(int value) : foo_(value) {}
CopyNoAssign(const CopyNoAssign& other) : foo_(other.foo_) {}
int foo_;
private:
const CopyNoAssign& operator=(const CopyNoAssign&);
};
class NoDefaultConstructor {
public:
explicit NoDefaultConstructor(int foo);
};
static_assert(!std::is_default_constructible<NoDefaultConstructor>(),
"Should not be default-constructible.");
absl::StatusOr<std::unique_ptr<int>> ReturnUniquePtr() {
return std::unique_ptr<int>(new int(0));
}
TEST(StatusOr, NullPointerStatusOr) {
absl::StatusOr<int*> null_status(nullptr);
EXPECT_TRUE(null_status.ok());
EXPECT_EQ(null_status.value(), nullptr);
}
TEST(StatusOr, TestNoDefaultConstructorInitialization) {
absl::StatusOr<NoDefaultConstructor> statusor(errors::Cancelled(""));
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status().code(), absl::StatusCode::kCancelled);
absl::StatusOr<NoDefaultConstructor> statusor2;
EXPECT_FALSE(statusor2.ok());
EXPECT_EQ(statusor2.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOr, TestMoveOnlyInitialization) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
int* previous = thing.value().get();
thing = ReturnUniquePtr();
EXPECT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
EXPECT_NE(previous, thing.value().get());
}
TEST(StatusOr, TestMoveOnlyStatusCtr) {
absl::StatusOr<std::unique_ptr<int>> thing(errors::Cancelled(""));
ASSERT_FALSE(thing.ok());
}
TEST(StatusOr, TestMoveOnlyValueExtraction) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
std::unique_ptr<int> ptr = std::move(thing).value();
EXPECT_EQ(0, *ptr);
thing = std::move(ptr);
ptr = std::move(thing.value());
EXPECT_EQ(0, *ptr);
}
TEST(StatusOr, TestMoveOnlyConversion) {
absl::StatusOr<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
const int* const_previous = const_thing.value().get();
const_thing = ReturnUniquePtr();
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
EXPECT_NE(const_previous, const_thing.value().get());
}
TEST(StatusOr, TestMoveOnlyVector) {
std::vector<absl::StatusOr<std::unique_ptr<int>>> vec;
vec.push_back(ReturnUniquePtr());
vec.resize(2);
auto another_vec = std::move(vec);
EXPECT_EQ(0, *another_vec[0].value());
EXPECT_EQ(absl::StatusCode::kUnknown, another_vec[1].status().code());
}
TEST(StatusOr, TestMoveWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = std::move(value1);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = std::move(error1);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = std::move(error2);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = std::move(value2);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
}
TEST(StatusOr, TestCopyWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = value1;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = error1;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = error2;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = value2;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
EXPECT_EQ(std::string(1000, '1'), value1.value());
EXPECT_EQ("error1", error1.status().message());
EXPECT_EQ("error2", error2.status().message());
EXPECT_EQ(std::string(1000, '2'), value2.value());
}
TEST(StatusOr, TestDefaultCtor) {
absl::StatusOr<int> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestDefaultCtorValue) {
absl::StatusOr<int> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
const absl::StatusOr<int> thing2;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestStatusCtor) {
absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestValueCtor) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestCopyCtorStatusOk) {
const int kI = 4;
const absl::StatusOr<int> original(kI);
const absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOk) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestCopyCtorNonAssignable) {
const int kI = 4;
CopyNoAssign value(kI);
absl::StatusOr<CopyNoAssign> original(value);
absl::StatusOr<CopyNoAssign> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value().foo_, copy.value().foo_);
}
TEST(StatusOr, TestCopyCtorStatusOKConverting) {
const int kI = 4;
absl::StatusOr<int> original(kI);
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_DOUBLE_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOkConverting) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestAssignmentStatusOk) {
const int kI = 4;
absl::StatusOr<int> source(kI);
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestAssignmentStatusNotOk) {
absl::StatusOr<int> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestStatus) {
absl::StatusOr<int> good(4);
EXPECT_TRUE(good.ok());
absl::StatusOr<int> bad(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(bad.ok());
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestValue) {
const int kI = 4;
absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestValueConst) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOrDeathTest, TestValueNotOk) {
absl::StatusOr<int> thing(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "cancelled");
#endif
}
TEST(StatusOrDeathTest, TestValueNotOkConst) {
const absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kUnknown, ""));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerDefaultCtor) {
absl::StatusOr<int*> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) {
absl::StatusOr<int*> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerStatusCtor) {
absl::StatusOr<int*> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValueCtor) {
const int kI = 4;
absl::StatusOr<const int*> thing(&kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> original(&kI);
absl::StatusOr<const int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOk) {
absl::StatusOr<int*> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {
Derived derived;
absl::StatusOr<Derived*> original(&derived);
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(static_cast<const Base2*>(original.value()), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {
absl::StatusOr<Derived*> original(
absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerAssignmentStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> source(&kI);
absl::StatusOr<const int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestPointerAssignmentStatusNotOk) {
absl::StatusOr<int*> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestPointerStatus) {
const int kI = 0;
absl::StatusOr<const int*> good(&kI);
EXPECT_TRUE(good.ok());
absl::StatusOr<const int*> bad(
absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValue) {
const int kI = 0;
absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerValueConst) {
const int kI = 0;
const absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestArrowOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(*uptr->get(), 0);
}
TEST(StatusOr, TestStarOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(**uptr, 0);
}
TEST(StatusOr, TestStarOperatorDeath) {
absl::StatusOr<Base1> error(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
EXPECT_DEATH(*error, "cancelled");
}
static absl::StatusOr<int> MakeStatus() { return 100; }
template <typename T>
class BenchmarkFactory {
public:
BenchmarkFactory() : value_(new T) {}
~BenchmarkFactory() { delete value_; }
T* TrivialFactory() TF_ATTRIBUTE_NOINLINE { return value_; }
absl::Status ArgumentFactory(T** result) TF_ATTRIBUTE_NOINLINE {
*result = value_;
return absl::OkStatus();
}
absl::Status ArgumentFactoryFail(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kCancelled, "");
}
absl::Status ArgumentFactoryFailShortMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal, "");
}
absl::Status ArgumentFactoryFailLongMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
StatusOr<T*> StatusOrFactory() TF_ATTRIBUTE_NOINLINE {
return static_cast<T*>(value_);
}
StatusOr<T*> StatusOrFactoryFail() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kCancelled, "");
}
StatusOr<T*> StatusOrFactoryFailShortMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal, "");
}
StatusOr<T*> StatusOrFactoryFailLongMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
private:
T* volatile value_;
BenchmarkFactory(const BenchmarkFactory&) = delete;
void operator=(const BenchmarkFactory&) = delete;
};
class BenchmarkType {
public:
BenchmarkType() {}
virtual ~BenchmarkType() {}
virtual void DoWork() TF_ATTRIBUTE_NOINLINE {}
private:
BenchmarkType(const BenchmarkType&) = delete;
void operator=(const BenchmarkType&) = delete;
};
void BM_CalibrateWorkLoop(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
BenchmarkType* result = factory.TrivialFactory();
for (auto s : state) {
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_CalibrateWorkLoop);
void BM_TrivialFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = factory.TrivialFactory();
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_TrivialFactory);
void BM_ArgumentFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactory(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactory);
void BM_StatusOrFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactory();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactory);
void BM_ArgumentFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFail(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFail);
void BM_StatusOrFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactoryFail();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFail);
void BM_ArgumentFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailShortMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailShortMsg);
void BM_StatusOrFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailShortMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailShortMsg);
void BM_ArgumentFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailLongMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailLongMsg);
void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailLongMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailLongMsg);
#if defined(PLATFORM_GOOGLE)
absl::StatusOr<int> GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::StatusOr<int> PropagateError() {
TF_ASSIGN_OR_RETURN(int a, GetError());
return a;
}
absl::StatusOr<int> PropagateError2() {
TF_ASSIGN_OR_RETURN(int a, PropagateError());
return a;
}
TEST(Status, StackTracePropagation) {
absl::StatusOr<int> s = PropagateError2();
auto sources = s.status().GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/statusor_test.cc");
}
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/statusor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/statusor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cba17e35-f5cf-4369-abac-57976f000bab | cpp | tensorflow/tensorflow | pin_to_host_optimizer | tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc | tensorflow/core/grappler/optimizers/pin_to_host_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/tpu.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace grappler {
namespace internal {
constexpr int64_t kTensorMaxSize = 64;
bool IsDenylisted(const NodeDef& node) {
return
IsCollective(node) ||
IsControlFlow(node) ||
IsNoOp(node);
}
bool IsTensorSmall(const OpInfo::TensorProperties& prop) {
if (prop.dtype() == DataType::DT_STRING) {
return true;
}
if (prop.dtype() != DataType::DT_INT32 &&
prop.dtype() != DataType::DT_INT64 &&
prop.dtype() != DataType::DT_FLOAT) {
return false;
}
const int64_t size = NumCoefficients(prop.shape());
if (size < 0 || size > kTensorMaxSize) {
return false;
}
return true;
}
Status TryFindKernelDef(const std::vector<DeviceType>& devices,
const NodeDef& node, const KernelDef** kdef) {
for (const DeviceType& device : devices) {
const KernelDef* kernel = nullptr;
Status s = FindKernelDef(device, node, &kernel, nullptr);
if (s.ok()) {
if (kdef) {
*kdef = kernel;
}
return absl::OkStatus();
}
}
return errors::NotFound("Could not find KernelDef for op: ", node.op());
}
Status IsNodeOutputPortHostFriendly(const GraphView& graph,
GraphProperties* properties,
const NodeDef& node, int port_id,
bool* is_candidate) {
*is_candidate = false;
if (IsDenylisted(node)) {
return absl::OkStatus();
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
const auto& output_properties = properties->GetOutputProperties(node.name());
int output_properties_size = output_properties.size();
if (port_id >= output_properties_size) {
LOG(WARNING) << "port_id=" << port_id
<< " but output_properties.size()=" << output_properties.size()
<< "\n"
<< node.DebugString();
return absl::OkStatus();
}
if (!IsTensorSmall(output_properties[port_id])) {
return absl::OkStatus();
}
if (IsIdentity(node) || IsIdentityNSingleInput(node)) {
for (const auto& fanin : graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return absl::OkStatus();
}
const int output_arg_id = OpOutputPortIdToArgId(node, *op, port_id);
if (output_arg_id < 0) {
LOG(WARNING) << "Invalid port: " << port_id << "!\n"
<< node.DebugString() << "\n"
<< op->DebugString();
return absl::OkStatus();
}
const KernelDef* kernel = nullptr;
s = TryFindKernelDef({node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node,
&kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return absl::OkStatus();
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->output_arg(output_arg_id).name() == host_memory_arg) {
*is_candidate = true;
break;
}
}
return absl::OkStatus();
}
bool IsNodeInputPortHostFriendly(const NodeDef& node, int port_id) {
if (absl::StrContains(node.device(), DEVICE_CPU)) {
return true;
}
const OpDef* op = nullptr;
Status s = OpRegistry::Global()->LookUpOpDef(node.op(), &op);
if (!s.ok()) {
LOG(WARNING) << "Could not find OpDef for : " << node.op();
return false;
}
const int input_arg_id = OpInputPortIdToArgId(node, *op, port_id);
const KernelDef* kernel = nullptr;
s = internal::TryFindKernelDef(
{node.device().c_str(), DEVICE_GPU, DEVICE_CPU}, node, &kernel);
if (!s.ok()) {
LOG(INFO) << "Could not find KernelDef for: " << node.op();
return false;
}
for (const string& host_memory_arg : kernel->host_memory_arg()) {
if (op->input_arg(input_arg_id).name() == host_memory_arg) {
return true;
}
}
return false;
}
Status IsNodeHostCandidate(const GraphView& graph, GraphProperties* properties,
const NodeDef& node, bool* is_candidate) {
*is_candidate = false;
if (absl::StrContains(node.device(), DEVICE_CPU)) {
*is_candidate = true;
return absl::OkStatus();
}
if (IsDenylisted(node)) {
return absl::OkStatus();
}
Status s = TryFindKernelDef({DEVICE_CPU}, node, nullptr);
if (!s.ok()) {
return absl::OkStatus();
}
for (const GraphView::OutputPort& fanin :
graph.GetFanins(node, false)) {
bool fanin_candidate = false;
TF_RETURN_IF_ERROR(IsNodeOutputPortHostFriendly(
graph, properties, *fanin.node, fanin.port_id, &fanin_candidate));
if (!fanin_candidate) {
return absl::OkStatus();
}
}
if (!properties->has_properties()) {
TF_RETURN_IF_ERROR(properties->InferStatically(
false, false,
false));
}
for (const auto& prop : properties->GetOutputProperties(node.name())) {
if (!IsTensorSmall(prop)) {
return absl::OkStatus();
}
}
*is_candidate = true;
return absl::OkStatus();
}
string TryFindHostDevice(const gtl::FlatSet<string>& devices,
bool has_device_cpu, const string& device) {
if (device.empty() && has_device_cpu) {
return "/device:CPU:0";
} else if (absl::StrContains(device, DEVICE_GPU)) {
for (const auto& device_match :
{std::pair<string, string>("GPU", "CPU:0"),
std::pair<string, string>("/device", "/device:CPU:0")}) {
const string device_host =
strings::StrCat(device.substr(0, device.rfind(device_match.first)),
device_match.second);
if (devices.find(device_host) != devices.end()) {
return device_host;
}
}
}
return "";
}
}
Status PinToHostOptimizer::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
*optimized_graph = item.graph;
if (IsLegacyTPUBridgeGraphDef(*optimized_graph)) {
return absl::OkStatus();
}
GraphProperties properties(item);
GraphView graph(optimized_graph);
gtl::FlatSet<string> devices;
if (cluster) {
const std::vector<string> device_names = cluster->GetDeviceNames();
devices.insert(device_names.begin(), device_names.end());
} else {
devices = {"/device:CPU:0"};
}
const bool has_device_cpu = devices.find("/device:CPU:0") != devices.end();
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
std::vector<std::pair<NodeDef*, string>> const_nodes;
for (auto& node : *optimized_graph->mutable_node()) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
bool is_candidate = false;
TF_RETURN_IF_ERROR(
internal::IsNodeHostCandidate(graph, &properties, node, &is_candidate));
if (!is_candidate) {
continue;
}
string device =
internal::TryFindHostDevice(devices, has_device_cpu, node.device());
if (!device.empty()) {
if (IsConstant(node)) {
const_nodes.emplace_back(&node, node.device());
}
VLOG(2) << "Moving node " << node.name() << " to device " << device;
*node.mutable_device() = std::move(device);
}
}
for (auto& it : const_nodes) {
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
NodeDef* node = it.first;
const string& device = it.second;
for (const GraphView::InputPort& fanout : graph.GetFanouts(*node, false)) {
if (!internal::IsNodeInputPortHostFriendly(*fanout.node,
fanout.port_id)) {
VLOG(2) << "Swapping node " << node->name() << " back to device "
<< device;
node->set_device(device);
break;
}
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/pin_to_host_optimizer.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class PinToHostOptimizerTest : public GrapplerTest {};
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceNoDevices) {
gtl::FlatSet<string> devices = {};
EXPECT_EQ(internal::TryFindHostDevice(devices, false, "ABC"), "");
}
TEST_F(PinToHostOptimizerTest, TryFindHostDeviceCpuXlaGpu) {
gtl::FlatSet<string> devices = {"/device:CPU:0", "/device:XLA_GPU:0"};
EXPECT_EQ(internal::TryFindHostDevice(devices, true, ""), "/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:0"),
"/device:CPU:0");
EXPECT_EQ(internal::TryFindHostDevice(devices, true, "/device:XLA_GPU:*"),
"/device:CPU:0");
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
int num_int32 = 4;
Output f = ops::Const(s.WithOpName("f"), {"test"});
GrapplerItem item;
item.fetch = {"a", "c", "d", "e", "f"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
if (i < num_int32) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
} else {
test::ExpectTensorEqual<tstring>(tensors[i], tensors_expected[i]);
}
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e" || node.name() == "f") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, OptimizeSmallFloatOpsToHost) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 0.0f, {1024, 1024});
Output input_min = ops::Const(s.WithOpName("input_min"), 0.0f);
Output input_max = ops::Const(s.WithOpName("input_max"), 6.0f);
Output b =
ops::QuantizeAndDequantizeV2(s.WithOpName("b"), a, input_min, input_max);
GrapplerItem item;
item.fetch = {"b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<float>(tensors[i], tensors_expected[i]);
}
for (const NodeDef& node : output.node()) {
if (node.name() == "input_min" || node.name() == "input_max") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
}
}
}
TEST_F(PinToHostOptimizerTest, TopologicalSort) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1024, 1024});
Output c = ops::Shape(s.WithOpName("c"), a);
Output d = ops::Const(s.WithOpName("d"), 0, {1});
Output e = ops::ReduceProd(s.WithOpName("e"), c, d);
GrapplerItem item;
item.fetch = {"a", "c", "d", "e"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
std::reverse(item.graph.mutable_node()->begin(),
item.graph.mutable_node()->end());
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_TRUE(node.device().empty());
} else if (node.name() == "d" || node.name() == "e") {
EXPECT_EQ(node.device(), "/device:CPU:0");
}
++found;
}
EXPECT_EQ(found, 4);
}
TEST_F(PinToHostOptimizerTest, NoSwap) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 1});
Output b = ops::Const(s.WithOpName("b"), 1, {1, 1024 * 1024});
Output c = ops::MatMul(s.WithOpName("c"), a, b);
GrapplerItem item;
item.fetch = {"a", "b", "c"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_TRUE(node.device().empty());
++found;
}
EXPECT_EQ(found, 3);
}
TEST_F(PinToHostOptimizerTest, Identity) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a =
ops::Const(s.WithOpName("a").WithDevice("/device:GPU:0"), 1, {64, 64});
Output b = ops::Const(s.WithOpName("b"), {0, 1}, {2});
Output c =
ops::ReduceProd(s.WithOpName("c").WithDevice("/device:GPU:0"), a, b);
Output d = ops::Identity(s.WithDevice("/device:CPU:0").WithOpName("d"), c);
Output e = ops::Multiply(s.WithOpName("e"), d, d);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "a" || node.name() == "c") {
EXPECT_EQ(node.device(), "/device:GPU:0");
} else if (node.name() == "b") {
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
EXPECT_EQ(node.device(), "/device:CPU:0");
#else
EXPECT_TRUE(node.device().empty());
#endif
} else if (node.name() == "d") {
EXPECT_EQ(node.device(), "/device:CPU:0");
} else if (node.name() == "e") {
EXPECT_TRUE(node.device().empty());
}
++found;
}
EXPECT_EQ(found, 5);
}
TEST_F(PinToHostOptimizerTest, PortIdToArgId) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output a = ops::Const(s.WithOpName("a"), 1, {1, 2, 3});
ops::ShapeN b(s.WithOpName("b"), {a, a, a});
GrapplerItem item;
item.fetch = {"a", "b"};
TF_CHECK_OK(s.ToGraphDef(&item.graph));
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
GraphDef output;
PinToHostOptimizer optimizer(RewriterConfig::ON);
TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(item.graph, item.fetch);
EXPECT_EQ(tensors_expected.size(), tensors.size());
for (int i = 0; i < tensors.size(); ++i) {
test::ExpectTensorEqual<int32>(tensors[i], tensors_expected[i]);
}
int found = 0;
for (const NodeDef& node : output.node()) {
EXPECT_EQ(node.device(), "/device:CPU:0");
++found;
}
EXPECT_EQ(found, 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/pin_to_host_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/pin_to_host_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
13d35957-77ef-4355-a5a8-1cecf402d99c | cpp | tensorflow/tensorflow | spectrogram | tensorflow/lite/kernels/internal/spectrogram.cc | tensorflow/core/kernels/spectrogram_test.cc | #include "tensorflow/lite/kernels/internal/spectrogram.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include "third_party/fft2d/fft.h"
namespace tflite {
namespace internal {
using std::complex;
namespace {
void GetPeriodicHann(int window_length, std::vector<double>* window) {
const double pi = std::atan(1.0) * 4.0;
window->resize(window_length);
for (int i = 0; i < window_length; ++i) {
(*window)[i] = 0.5 - 0.5 * cos((2.0 * pi * i) / window_length);
}
}
}
bool Spectrogram::Initialize(int window_length, int step_length) {
std::vector<double> window;
GetPeriodicHann(window_length, &window);
return Initialize(window, step_length);
}
inline int Log2Floor(uint32_t n) {
if (n == 0) return -1;
int log = 0;
uint32_t value = n;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
uint32_t x = value >> shift;
if (x != 0) {
value = x;
log += shift;
}
}
return log;
}
inline int Log2Ceiling(uint32_t n) {
int floor = Log2Floor(n);
if (n == (n & ~(n - 1)))
return floor;
else
return floor + 1;
}
inline uint32_t NextPowerOfTwo(uint32_t value) {
int exponent = Log2Ceiling(value);
return 1 << exponent;
}
bool Spectrogram::Initialize(const std::vector<double>& window,
int step_length) {
window_length_ = window.size();
window_ = window;
if (window_length_ < 2) {
initialized_ = false;
return false;
}
step_length_ = step_length;
if (step_length_ < 1) {
initialized_ = false;
return false;
}
fft_length_ = NextPowerOfTwo(window_length_);
output_frequency_channels_ = 1 + fft_length_ / 2;
fft_input_output_.assign(fft_length_ + 2, 0.0);
int half_fft_length = fft_length_ / 2;
fft_double_working_area_.assign(half_fft_length, 0.0);
fft_integer_working_area_.assign(2 + static_cast<int>(sqrt(half_fft_length)),
0);
fft_integer_working_area_[0] = 0;
input_queue_.clear();
samples_to_next_step_ = window_length_;
initialized_ = true;
return true;
}
template <class InputSample, class OutputSample>
bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<complex<OutputSample>>>* output) {
if (!initialized_) {
return false;
}
output->clear();
int input_start = 0;
while (GetNextWindowOfSamples(input, &input_start)) {
ProcessCoreFFT();
output->resize(output->size() + 1);
auto& spectrogram_slice = output->back();
spectrogram_slice.resize(output_frequency_channels_);
for (int i = 0; i < output_frequency_channels_; ++i) {
spectrogram_slice[i] = complex<OutputSample>(
fft_input_output_[2 * i], fft_input_output_[2 * i + 1]);
}
}
return true;
}
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<float>& input, std::vector<std::vector<complex<float>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<double>& input,
std::vector<std::vector<complex<float>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<float>& input,
std::vector<std::vector<complex<double>>>*);
template bool Spectrogram::ComputeComplexSpectrogram(
const std::vector<double>& input,
std::vector<std::vector<complex<double>>>*);
template <class InputSample, class OutputSample>
bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<InputSample>& input,
std::vector<std::vector<OutputSample>>* output) {
if (!initialized_) {
return false;
}
output->clear();
int input_start = 0;
while (GetNextWindowOfSamples(input, &input_start)) {
ProcessCoreFFT();
output->resize(output->size() + 1);
auto& spectrogram_slice = output->back();
spectrogram_slice.resize(output_frequency_channels_);
for (int i = 0; i < output_frequency_channels_; ++i) {
const double re = fft_input_output_[2 * i];
const double im = fft_input_output_[2 * i + 1];
spectrogram_slice[i] = re * re + im * im;
}
}
return true;
}
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<float>& input, std::vector<std::vector<float>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<double>& input, std::vector<std::vector<float>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<float>& input, std::vector<std::vector<double>>*);
template bool Spectrogram::ComputeSquaredMagnitudeSpectrogram(
const std::vector<double>& input, std::vector<std::vector<double>>*);
template <class InputSample>
bool Spectrogram::GetNextWindowOfSamples(const std::vector<InputSample>& input,
int* input_start) {
auto input_it = input.begin() + *input_start;
int input_remaining = input.end() - input_it;
if (samples_to_next_step_ > input_remaining) {
input_queue_.insert(input_queue_.end(), input_it, input.end());
*input_start += input_remaining;
samples_to_next_step_ -= input_remaining;
return false;
} else {
input_queue_.insert(input_queue_.end(), input_it,
input_it + samples_to_next_step_);
*input_start += samples_to_next_step_;
input_queue_.erase(
input_queue_.begin(),
input_queue_.begin() + input_queue_.size() - window_length_);
samples_to_next_step_ = step_length_;
return true;
}
}
void Spectrogram::ProcessCoreFFT() {
for (int j = 0; j < window_length_; ++j) {
fft_input_output_[j] = input_queue_[j] * window_[j];
}
for (int j = window_length_; j < fft_length_; ++j) {
fft_input_output_[j] = 0.0;
}
const int kForwardFFT = 1;
rdft(fft_length_, kForwardFFT, &fft_input_output_[0],
&fft_integer_working_area_[0], &fft_double_working_area_[0]);
fft_input_output_[fft_length_] = fft_input_output_[1];
fft_input_output_[fft_length_ + 1] = 0;
fft_input_output_[1] = 0;
}
}
} | #include "tensorflow/core/kernels/spectrogram.h"
#include <complex>
#include <vector>
#include "tensorflow/core/kernels/spectrogram_test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using ::std::complex;
string InputFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment.wav");
}
string ExpectedFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment_spectrogram.csv.bin");
}
const int kDataVectorLength = 257;
const int kNumberOfFramesInTestData = 178;
string ExpectedNonPowerOfTwoFilename() {
return io::JoinPath("tensorflow", "core", "kernels", "spectrogram_test_data",
"short_test_segment_spectrogram_400_200.csv.bin");
}
const int kNonPowerOfTwoDataVectorLength = 257;
const int kNumberOfFramesInNonPowerOfTwoTestData = 228;
TEST(SpectrogramTest, TooLittleDataYieldsNoFrames) {
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.001, &input);
EXPECT_EQ(44, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(0, output.size());
}
TEST(SpectrogramTest, StepSizeSmallerThanWindow) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(400, 200));
std::vector<double> input;
SineWave(44100, 1000.0, 0.015, &input);
EXPECT_EQ(661, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest, StepSizeBiggerThanWindow) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(200, 400));
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest, StepSizeBiggerThanWindow2) {
Spectrogram sgram;
EXPECT_TRUE(sgram.Initialize(200, 400));
std::vector<double> input;
SineWave(44100, 1000.0, 0.016, &input);
EXPECT_GT(input.size(), 600);
EXPECT_LT(input.size(), 800);
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(2, output.size());
}
TEST(SpectrogramTest,
MultipleCallsToComputeComplexSpectrogramMayYieldDifferentNumbersOfFrames) {
Spectrogram sgram;
sgram.Initialize(200, 400);
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
const std::vector<int> expected_output_sizes = {
2,
2,
3,
};
for (int expected_output_size : expected_output_sizes) {
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(expected_output_size, output.size());
}
}
TEST(SpectrogramTest, CumulatingExcessInputsForOverlappingFrames) {
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.02, &input);
EXPECT_EQ(882, input.size());
std::vector<std::vector<complex<double>>> output;
const std::vector<int> expected_output_sizes = {
3,
4,
5,
};
for (int expected_output_size : expected_output_sizes) {
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(expected_output_size, output.size());
}
}
TEST(SpectrogramTest, StepSizeEqualToWindowWorks) {
Spectrogram sgram;
sgram.Initialize(200, 200);
std::vector<double> input;
SineWave(44100, 1000.0, 0.05, &input);
EXPECT_EQ(2205, input.size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
EXPECT_EQ(11, output.size());
}
template <class ExpectedSample, class ActualSample>
void CompareComplexData(
const std::vector<std::vector<complex<ExpectedSample>>>& expected,
const std::vector<std::vector<complex<ActualSample>>>& actual,
double tolerance) {
ASSERT_EQ(actual.size(), expected.size());
for (int i = 0; i < expected.size(); ++i) {
ASSERT_EQ(expected[i].size(), actual[i].size());
for (int j = 0; j < expected[i].size(); ++j) {
ASSERT_NEAR(real(expected[i][j]), real(actual[i][j]), tolerance)
<< ": where i=" << i << " and j=" << j << ".";
ASSERT_NEAR(imag(expected[i][j]), imag(actual[i][j]), tolerance)
<< ": where i=" << i << " and j=" << j << ".";
}
}
}
template <class Sample>
double GetMaximumAbsolute(const std::vector<std::vector<Sample>>& spectrogram) {
double max_absolute = 0.0;
for (int i = 0; i < spectrogram.size(); ++i) {
for (int j = 0; j < spectrogram[i].size(); ++j) {
double absolute_value = std::abs(spectrogram[i][j]);
if (absolute_value > max_absolute) {
max_absolute = absolute_value;
}
}
}
return max_absolute;
}
template <class ExpectedSample, class ActualSample>
void CompareMagnitudeData(
const std::vector<std::vector<complex<ExpectedSample>>>&
expected_complex_output,
const std::vector<std::vector<ActualSample>>& actual_squared_magnitude,
double tolerance) {
ASSERT_EQ(actual_squared_magnitude.size(), expected_complex_output.size());
for (int i = 0; i < expected_complex_output.size(); ++i) {
ASSERT_EQ(expected_complex_output[i].size(),
actual_squared_magnitude[i].size());
for (int j = 0; j < expected_complex_output[i].size(); ++j) {
ASSERT_NEAR(norm(expected_complex_output[i][j]),
actual_squared_magnitude[i][j], tolerance)
<< ": where i=" << i << " and j=" << j << ".";
}
}
}
TEST(SpectrogramTest, ReInitializationWorks) {
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
std::vector<std::vector<complex<double>>> first_output;
std::vector<std::vector<complex<double>>> second_output;
sgram.Initialize(512, 256);
sgram.ComputeComplexSpectrogram(input, &first_output);
sgram.Initialize(512, 256);
sgram.ComputeComplexSpectrogram(input, &second_output);
ASSERT_EQ(first_output.size(), second_output.size());
int slice_size = first_output[0].size();
for (int i = 0; i < first_output.size(); ++i) {
ASSERT_EQ(slice_size, first_output[i].size());
ASSERT_EQ(slice_size, second_output[i].size());
for (int j = 0; j < slice_size; ++j) {
ASSERT_EQ(first_output[i][j], second_output[i][j]);
}
}
}
TEST(SpectrogramTest, ComputedComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-5);
}
TEST(SpectrogramTest, ComputedFloatComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> double_input;
CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()),
&double_input));
std::vector<float> input;
input.assign(double_input.begin(), double_input.end());
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<float>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-4);
}
TEST(SpectrogramTest, ComputedSquaredMagnitudeDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<double>> output;
sgram.ComputeSquaredMagnitudeSpectrogram(input, &output);
CompareMagnitudeData(expected_output, output, 1e-3);
}
TEST(SpectrogramTest, ComputedFloatSquaredMagnitudeDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(512, 256);
std::vector<double> double_input;
CHECK(ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()),
&double_input));
EXPECT_EQ(kInputDataLength, double_input.size());
std::vector<float> input;
input.assign(double_input.begin(), double_input.end());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedFilename()), kDataVectorLength,
&expected_output));
EXPECT_EQ(kNumberOfFramesInTestData, expected_output.size());
EXPECT_EQ(kDataVectorLength, expected_output[0].size());
std::vector<std::vector<float>> output;
sgram.ComputeSquaredMagnitudeSpectrogram(input, &output);
double max_absolute = GetMaximumAbsolute(output);
EXPECT_GT(max_absolute, 2300.0);
CompareMagnitudeData(expected_output, output, 2e-4);
}
TEST(SpectrogramTest, ComputedNonPowerOfTwoComplexDataAgreeWithMatlab) {
const int kInputDataLength = 45870;
Spectrogram sgram;
sgram.Initialize(400, 200);
std::vector<double> input;
CHECK(
ReadWaveFileToVector(GetDataDependencyFilepath(InputFilename()), &input));
EXPECT_EQ(kInputDataLength, input.size());
std::vector<std::vector<complex<double>>> expected_output;
ASSERT_TRUE(ReadRawFloatFileToComplexVector(
GetDataDependencyFilepath(ExpectedNonPowerOfTwoFilename()),
kNonPowerOfTwoDataVectorLength, &expected_output));
EXPECT_EQ(kNumberOfFramesInNonPowerOfTwoTestData, expected_output.size());
EXPECT_EQ(kNonPowerOfTwoDataVectorLength, expected_output[0].size());
std::vector<std::vector<complex<double>>> output;
sgram.ComputeComplexSpectrogram(input, &output);
CompareComplexData(expected_output, output, 1e-5);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/spectrogram.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dd964e63-6f3d-4030-987b-827743b1f564 | cpp | tensorflow/tensorflow | tf_quantize_op | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <functional>
#include <optional>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_quantize_op_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
namespace {
constexpr StringRef kDequantizeFunctionName = "composite_dequantize";
constexpr StringRef kUniformQuantizationFunctionName = "uniform";
func::FuncOp PrepareFunctionRegister(PatternRewriter& rewriter, Value input_val,
ShapedType result_type,
StringRef func_name,
Value& func_input_arg) {
Operation* input_op = input_val.getDefiningOp();
Operation* insertion_point = input_op->getParentOfType<func::FuncOp>();
if (!insertion_point) insertion_point = input_op->getParentOfType<ModuleOp>();
rewriter.setInsertionPointAfter(insertion_point);
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(input_val.getType());
UnrankedTensorType create_unknown_output_shape =
CreateUnknownShapeFromElementType(result_type);
FunctionType func_type =
FunctionType::get(rewriter.getContext(), {create_unknown_input_shape},
{create_unknown_output_shape});
func::FuncOp quantization_func =
rewriter.create<func::FuncOp>(input_op->getLoc(), func_name, func_type);
OpBuilder::InsertionGuard guard = OpBuilder::InsertionGuard(rewriter);
ArrayRef<Type> inputs = quantization_func.getFunctionType().getInputs();
Block* block = rewriter.createBlock(
&quantization_func.getBody(), quantization_func.begin(), inputs,
SmallVector<Location>(inputs.size(), quantization_func.getLoc()));
func_input_arg = block->getArgument(0);
return quantization_func;
}
TF::PartitionedCallOp FinalizeFunctionRegister(
PatternRewriter& rewriter, Value input, Value output,
func::FuncOp& quantization_func, Operation* quantized_op,
StringRef func_name, IRRewriter::InsertPoint original_point,
Type quantize_result_type) {
rewriter.create<func::ReturnOp>(input.getLoc(), ArrayRef<Value>({output}));
quantization_func.setVisibility(func::FuncOp::Visibility::Private);
SymbolTable symbol_table(quantized_op->getParentOfType<ModuleOp>());
symbol_table.insert(quantization_func);
FlatSymbolRefAttr func_name_attr =
FlatSymbolRefAttr::get(rewriter.getStringAttr(func_name));
rewriter.restoreInsertionPoint(original_point);
auto quantize_call = rewriter.create<TF::PartitionedCallOp>(
quantized_op->getLoc(), quantize_result_type, input, func_name_attr,
"", "", "");
return quantize_call;
}
std::optional<TF::PartitionedCallOp> RegisterOperationsInFuncOp(
StringRef func_name, PatternRewriter& rewriter, QuantizedType quant_type,
Value input_val, ShapedType result_type,
std::function<Operation*(PatternRewriter&, Operation*, Value, ShapedType,
QuantizedType)>
quantization_operations_func) {
Operation* input_op = input_val.getDefiningOp();
auto original_point = rewriter.saveInsertionPoint();
auto unique_func_name = func_name.str();
SymbolTable symbol_table(input_op->getParentOfType<ModuleOp>());
while (symbol_table.lookup(unique_func_name)) {
absl::StrAppend(&unique_func_name, "_");
}
Value func_input_arg;
func::FuncOp func_op = PrepareFunctionRegister(
rewriter, input_val, result_type, unique_func_name, func_input_arg);
Operation* last_op_in_func =
quantization_operations_func(rewriter, func_op.getOperation(),
func_input_arg, result_type, quant_type);
auto end_call_op = FinalizeFunctionRegister(
rewriter, input_val, last_op_in_func->getResult(0), func_op, input_op,
unique_func_name, original_point, result_type);
return end_call_op;
}
QuantizedType CalculateUniformQuantParams(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
const bool kIsNarrowRange = true;
const bool kIsSigned = true;
const int kBitWidth = 8;
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) return nullptr;
QuantizedType quant_type = mlir::dyn_cast<quant::QuantizedType>(
quant::GetUniformQuantizedTypeForWeight(
attr, kIsNarrowRange && kIsSigned, kBitWidth, kIsSigned,
kIsNarrowRange, false));
return quant_type;
}
std::optional<Value> AddUniformQuantizeOps(PatternRewriter& rewriter,
TF::ConstOp op,
QuantizedType quant_type) {
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) {
return nullptr;
}
Type expressed_type = op.getResult().getType();
Type quantized_type = quant_type.castFromExpressedType(expressed_type);
ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type);
DenseElementsAttr tensor_proto_attr =
mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type));
if (!tensor_proto_attr) {
return nullptr;
}
Type storage_type =
mlir::cast<QuantizedType>(shaped_quantized_type.getElementType())
.getStorageType();
ShapedType new_type = shaped_quantized_type.clone(storage_type);
rewriter.setInsertionPointAfter(op);
auto const_op =
rewriter.create<TF::ConstOp>(op.getLoc(), new_type, tensor_proto_attr);
auto new_identity_op = rewriter.create<TF::IdentityOp>(
op->getLoc(), const_op.getType(), const_op);
return new_identity_op.getResult();
}
Operation* LogicsForUniformDequanization(PatternRewriter& rewriter,
Operation* func_op, Value input_val,
ShapedType original_input_tensor_type,
QuantizedType quant_type) {
auto loc = input_val.getLoc();
rewriter.setInsertionPointToStart(
&(cast<func::FuncOp>(func_op)).getBody().front());
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(original_input_tensor_type);
auto new_cast_op =
rewriter.create<TF::CastOp>(loc, create_unknown_input_shape, input_val);
auto qtype = mlir::dyn_cast<UniformQuantizedType>(quant_type);
TensorType scale_type = RankedTensorType::get({}, rewriter.getF32Type());
Value scale_op = rewriter.create<TF::ConstOp>(
loc, scale_type,
DenseFPElementsAttr::get(scale_type,
{static_cast<float>(qtype.getScale())}));
if (original_input_tensor_type.getElementType().isBF16()) {
scale_op = rewriter.create<TF::CastOp>(
loc, UnrankedTensorType::get(rewriter.getBF16Type()), scale_op);
}
auto mul_op = rewriter.create<TF::MulOp>(loc, new_cast_op.getType(), scale_op,
new_cast_op);
return mul_op;
}
std::optional<TF::PartitionedCallOp> AddUniformDequantizeOps(
PatternRewriter& rewriter, QuantizedType quant_type,
Value val_to_dequantize, ShapedType result_type) {
auto func_name = absl::StrJoin(
{kDequantizeFunctionName, kUniformQuantizationFunctionName}, "_");
std::optional<TF::PartitionedCallOp> dequant_op = RegisterOperationsInFuncOp(
func_name, rewriter, quant_type, val_to_dequantize, result_type,
LogicsForUniformDequanization);
return dequant_op;
}
}
std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
QuantizedType quant_type =
CalculateUniformQuantParams(rewriter, op, weight_spec);
if (!quant_type) return nullptr;
std::optional<Value> quantized_val =
AddUniformQuantizeOps(rewriter, op, quant_type);
if (!quantized_val.has_value()) return std::nullopt;
std::optional<TF::PartitionedCallOp> dequantized_val =
AddUniformDequantizeOps(rewriter, quant_type, quantized_val.value(),
mlir::cast<ShapedType>(op.getType()));
return dequantized_val;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <optional>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
TEST(TfQuantOpTest, applyUniformQuantization) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect, quant::QuantDialect,
func::FuncDialect>();
EmptyPatternRewriter pattern_rewriter(builder);
Value value = CreateConstValue<float>(builder, module->getLoc(), {1024, 2},
SmallVector<float>(2048, 0));
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization(
pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec);
EXPECT_TRUE(dequantize_op.has_value());
EXPECT_EQ(dequantize_op.value().func().getName().str(),
"composite_dequantize_uniform");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f0bf610-f23a-4dd6-9bfb-f1d3f2b30cc9 | cpp | tensorflow/tensorflow | uniform_quant_ops_params | tensorflow/core/util/quantization/uniform_quant_ops_params.cc | tensorflow/core/util/quantization/uniform_quant_ops_params_test.cc | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
Status ValidDim(int64_t dims, int64_t dim) {
if (dim < 0 || dim >= dims) {
return InvalidArgument(
"Each dimension number must be in region [0, rank). Given rank ", dims,
" and dimension number value ", dim);
}
return absl::OkStatus();
}
Status ValidSpatialDimensions(
int64_t dims, const protobuf::RepeatedField<int64_t>& spatial_dimensions) {
if (spatial_dimensions.size() != dims - 2) {
return InvalidArgument(
"Spatial dimensions size must be rank - 2. Given rank ", dims,
" and spatial dimensions size ", spatial_dimensions.size());
}
for (int i = 0; i < spatial_dimensions.size(); ++i) {
TF_RETURN_IF_ERROR(ValidDim(dims, spatial_dimensions.Get(i)));
}
return absl::OkStatus();
}
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const OpKernelConstruction& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::LoadFromAttrs(
const shape_inference::InferenceContext& context) {
return LoadFromAttrsInternal(context);
}
Status UniformQuantizedConvolutionParams::ValidateOrFillParamsAndValidateShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
if (lhs_shape.dims() != rhs_shape.dims()) {
return InvalidArgument(
"lhs and rhs must have same dims. Given lhs and rhs of shapes: ",
lhs_shape.DebugString(), rhs_shape.DebugString());
}
const int64_t dims = lhs_shape.dims();
if (dims <= 2) {
return InvalidArgument("lhs and rhs shape dims must be at least 3. Given: ",
dims);
}
const int64_t num_spatial_dims = dims - 2;
if (window_strides_.empty()) {
window_strides_.resize(num_spatial_dims, 1);
} else if (window_strides_.size() != num_spatial_dims) {
return InvalidArgument("Size of window_strides Attr must be dims - 2.");
} else if (!absl::c_all_of(window_strides_,
[](int stride) { return stride >= 1; })) {
return InvalidArgument(
"All elements of window_strides must be >= 1. Given ",
absl::StrJoin(window_strides_, ", "));
}
if (lhs_dilation_.empty()) {
lhs_dilation_.resize(num_spatial_dims, 1);
} else if (lhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of lhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(lhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of lhs_dilation must be >= 1. Given ",
absl::StrJoin(lhs_dilation_, ", "));
}
if (rhs_dilation_.empty()) {
rhs_dilation_.resize(num_spatial_dims, 1);
} else if (rhs_dilation_.size() != num_spatial_dims) {
return InvalidArgument("Size of rhs_dilation Attr must be dims - 2.");
} else if (!absl::c_all_of(rhs_dilation_, [](const int dilation) {
return dilation >= 1;
})) {
return InvalidArgument("All elements of rhs_dilation must be >= 1. Given ",
absl::StrJoin(rhs_dilation_, ", "));
}
if (dimension_numbers_.input_spatial_dimensions_size() == 0) {
dimension_numbers_.set_input_batch_dimension(0);
dimension_numbers_.set_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_input_spatial_dimensions(2 + i);
}
dimension_numbers_.set_kernel_output_feature_dimension(0);
dimension_numbers_.set_kernel_input_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_kernel_spatial_dimensions(2 + i);
}
dimension_numbers_.set_output_batch_dimension(0);
dimension_numbers_.set_output_feature_dimension(1);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
dimension_numbers_.add_output_spatial_dimensions(2 + i);
}
} else {
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.input_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.input_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_input_feature_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.kernel_output_feature_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.kernel_spatial_dimensions()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(
ValidDim(dims, dimension_numbers_.output_batch_dimension()));
TF_RETURN_IF_ERROR(ValidSpatialDimensions(
dims, dimension_numbers_.output_spatial_dimensions()));
}
if (feature_group_count_ <= 0) {
return InvalidArgument(
"feature_group_count must be a positive integer, given: ",
feature_group_count_);
}
const int64_t lhs_feature_count =
lhs_shape.dim_size(dimension_numbers_.input_feature_dimension());
if (lhs_feature_count % feature_group_count_) {
return InvalidArgument(
"feature_group_count must divide lhs feature dimension size, but ",
feature_group_count_, " does not divide ", lhs_feature_count);
}
const int64_t rhs_input_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_input_feature_dimension());
if (lhs_feature_count % rhs_input_feature_count) {
return InvalidArgument(
"rhs input feature dimension must divide lhs feature dimension "
"size, but ",
rhs_input_feature_count, " does not divide ", lhs_feature_count);
}
if (lhs_feature_count / feature_group_count_ != rhs_input_feature_count) {
return InvalidArgument(
"lhs feature dimension size divided by feature_group_count must equal "
"the rhs input feature dimension size, but ",
lhs_feature_count, " / ", feature_group_count_,
" != ", rhs_input_feature_count);
}
const int64_t rhs_output_feature_count =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
if (rhs_output_feature_count % feature_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of feature_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ",
feature_group_count_);
}
if (batch_group_count_ <= 0) {
return InvalidArgument(
"batch_group_count Attr must be a positive integer. Given: ",
batch_group_count_);
}
const int64_t lhs_batch_count =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension());
if (lhs_batch_count % batch_group_count_) {
return InvalidArgument(
"batch_group_count must divide lhs batch dimension size, but ",
batch_group_count_, " does not divide ", lhs_batch_count);
}
if (rhs_output_feature_count % batch_group_count_) {
return InvalidArgument(
"rhs output dimension size must be a multiple of batch_group_count, "
"but ",
rhs_output_feature_count, " is not a multiple of ", batch_group_count_);
}
return ValidateOrFillPaddingList(lhs_shape, rhs_shape);
}
absl::StatusOr<TensorShape>
UniformQuantizedConvolutionParams::CalculateOutputShape(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) const {
std::vector<int64_t> output_shape_buf(lhs_shape.dims());
output_shape_buf[dimension_numbers_.output_batch_dimension()] =
lhs_shape.dim_size(dimension_numbers_.input_batch_dimension()) /
batch_group_count_;
output_shape_buf[dimension_numbers_.output_feature_dimension()] =
rhs_shape.dim_size(dimension_numbers_.kernel_output_feature_dimension());
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size(); ++i) {
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size_numerator =
lhs_size_dilated + padding_list_[2 * i] + padding_list_[2 * i + 1] -
rhs_size_dilated + 1;
const int64_t output_size_denominator = window_strides_[i];
output_shape_buf[dimension_numbers_.output_spatial_dimensions(i)] =
(output_size_numerator + output_size_denominator - 1) /
output_size_denominator;
}
TensorShape output_shape;
TF_RETURN_IF_ERROR(
TensorShape::BuildTensorShape(output_shape_buf, &output_shape));
return output_shape;
}
template <typename ContextT>
Status UniformQuantizedConvolutionParams::LoadFromAttrsInternal(
const ContextT& context) {
TF_RETURN_IF_ERROR(context.GetAttr("window_strides", &window_strides_));
TF_RETURN_IF_ERROR(context.GetAttr("lhs_dilation", &lhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("rhs_dilation", &rhs_dilation_));
TF_RETURN_IF_ERROR(context.GetAttr("batch_group_count", &batch_group_count_));
TF_RETURN_IF_ERROR(
context.GetAttr("feature_group_count", &feature_group_count_));
TF_RETURN_IF_ERROR(context.GetAttr("padding", &padding_));
TF_RETURN_IF_ERROR(context.GetAttr("explicit_padding", &padding_list_));
if (padding_ != "EXPLICIT" && padding_ != "SAME" && padding_ != "VALID") {
return InvalidArgument(
"padding Attr must be one of [EXPLICIT | SAME | VALID], but given: ",
padding_);
} else if (padding_ != "EXPLICIT" && !padding_list_.empty()) {
return InvalidArgument(
"If padding Attr is not 'EXPLICIT', explicit_padding Attr must be "
"empty. Given padding ",
padding_, " and explicit_padding of size ", padding_list_.size());
}
std::string dimension_numbers_str;
TF_RETURN_IF_ERROR(
context.GetAttr("dimension_numbers", &dimension_numbers_str));
if (dimension_numbers_str.empty()) {
dimension_numbers_.Clear();
} else if (!dimension_numbers_.ParseFromString(dimension_numbers_str)) {
return InvalidArgument("Error parsing convolution dimension numbers.");
}
return absl::OkStatus();
}
Status UniformQuantizedConvolutionParams::ValidateOrFillPaddingList(
const TensorShape& lhs_shape, const TensorShape& rhs_shape) {
const int64_t dims = lhs_shape.dims();
const int64_t padding_list_size = 2 * (dims - 2);
if (padding_ == "EXPLICIT") {
if (padding_list_.size() != padding_list_size) {
return InvalidArgument(
"Size of explicit_padding Attr must be 2 * (rank - 2). Given rank ",
dims, " and explicit_padding of size ", padding_list_.size());
} else if (!absl::c_all_of(padding_list_,
[](int elem) { return elem >= 0; })) {
return InvalidArgument("All explicit_padding elems must be >= 0, Given ",
absl::StrJoin(padding_list_, ", "));
}
} else if (padding_ == "VALID") {
padding_list_.resize(padding_list_size, 0);
} else {
padding_list_.resize(padding_list_size);
for (int i = 0; i < dimension_numbers_.input_spatial_dimensions_size();
++i) {
const int64_t stride = window_strides_[i];
const int64_t lhs_size_dilated = DilatedSize(
lhs_shape.dim_size(dimension_numbers_.input_spatial_dimensions(i)),
lhs_dilation_[i]);
const int64_t rhs_size_dilated = DilatedSize(
rhs_shape.dim_size(dimension_numbers_.kernel_spatial_dimensions(i)),
rhs_dilation_[i]);
const int64_t output_size = (lhs_size_dilated + stride - 1) / stride;
const int64_t total_padding = std::max(
(output_size - 1) * stride + rhs_size_dilated - lhs_size_dilated,
static_cast<int64_t>(0));
const int64_t padding_begin = total_padding / 2;
const int64_t padding_end = total_padding - padding_begin;
padding_list_[2 * i] = padding_begin;
padding_list_[2 * i + 1] = padding_end;
}
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/quantization/uniform_quant_ops_params.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace tensorflow {
namespace {
using protobuf::TextFormat;
using ::testing::ElementsAreArray;
TEST(UniformQuantizedConvolutionParamsTest, DilatedSize) {
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(0, 2), 0);
EXPECT_EQ(UniformQuantizedConvolutionParams::DilatedSize(10, 3), 28);
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 2, 3, 4},
{3, 2, 2, 3}));
EXPECT_THAT(params.window_strides(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.lhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.rhs_dilation(), ElementsAreArray({1, 1}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({0, 0, 0, 0}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({2, 3}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 1);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({2, 3}));
}
TEST(UniformQuantizedConvolutionParamsTest,
ValidateOrFillParamsAndValidateShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape({2, 3, 4, 2},
{2, 3, 1, 2}));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 1, 2, 2}));
EXPECT_EQ(params.dimension_numbers().input_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().input_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().input_spatial_dimensions(),
ElementsAreArray({1, 2}));
EXPECT_EQ(params.dimension_numbers().kernel_output_feature_dimension(), 3);
EXPECT_EQ(params.dimension_numbers().kernel_input_feature_dimension(), 2);
EXPECT_THAT(params.dimension_numbers().kernel_spatial_dimensions(),
ElementsAreArray({0, 1}));
EXPECT_EQ(params.dimension_numbers().output_batch_dimension(), 0);
EXPECT_EQ(params.dimension_numbers().output_feature_dimension(), 3);
EXPECT_THAT(params.dimension_numbers().output_spatial_dimensions(),
ElementsAreArray({1, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeDefaultAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"VALID");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 2, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 2, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateOutputShapeSetAttr) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
ASSERT_TRUE(TextFormat::ParseFromString(R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers));
UniformQuantizedConvolutionParams params({2, 2},
{3, 3},
{4, 4},
dimension_numbers,
2,
1,
"EXPLICIT",
{1, 1, 2, 2});
const TensorShape lhs_shape({2, 3, 4, 2});
const TensorShape rhs_shape({2, 3, 1, 2});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
auto shape_or = params.CalculateOutputShape(lhs_shape, rhs_shape);
TF_ASSERT_OK(shape_or.status());
EXPECT_TRUE(shape_or.value().IsSameSize({2, 3, 3, 2}));
}
TEST(UniformQuantizedConvolutionParamsTest, CalculateSameOptionPadding) {
UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
UniformQuantizedConvolutionParams params({},
{},
{},
dimension_numbers,
1,
1,
"SAME");
const TensorShape lhs_shape({2, 2, 3, 4});
const TensorShape rhs_shape({3, 2, 4, 3});
TF_ASSERT_OK(
params.ValidateOrFillParamsAndValidateShape(lhs_shape, rhs_shape));
EXPECT_THAT(params.padding_list(), ElementsAreArray({1, 2, 1, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/quantization/uniform_quant_ops_params.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/quantization/uniform_quant_ops_params_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
650091b6-2adc-4acd-a30c-698f8f8cc66e | cpp | tensorflow/tensorflow | xla_legalize_targets | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace mhlo {
ConversionTarget GetDefaultLegalConversionTargets(MLIRContext& mlir_context,
bool legalize_chlo) {
ConversionTarget target(mlir_context);
if (legalize_chlo) {
target.addIllegalDialect<chlo::ChloDialect>();
target.addIllegalDialect<stablehlo::StablehloDialect>();
} else {
target.addLegalDialect<chlo::ChloDialect>();
}
target.addLegalDialect<MhloDialect>();
target.addLegalDialect<arith::ArithDialect>();
target.addLegalDialect<func::FuncDialect>();
target.addLegalDialect<tensor::TensorDialect>();
target.addLegalDialect<shape::ShapeDialect>();
target.addLegalOp<func::CallOp>();
target.addLegalOp<TF::_XlaHostComputeMlirOp, TF::XlaSendToHostOp,
TF::XlaRecvFromHostOp>();
return target;
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace mhlo {
namespace {
mlir::DialectRegistry GetDefaultDialectRegistry() {
mlir::DialectRegistry registry;
registry.insert<arith::ArithDialect>();
registry.insert<func::FuncDialect>();
registry.insert<tensor::TensorDialect>();
registry.insert<shape::ShapeDialect>();
registry.insert<TF::TensorFlowDialect>();
registry.insert<chlo::ChloDialect>();
return registry;
}
class XlaLegalizeTargetsTest : public testing::Test {
public:
XlaLegalizeTargetsTest()
: context_(GetDefaultDialectRegistry()),
module_(mlir::ModuleOp::create(mlir::UnknownLoc::get(&context_))),
builder_(&module_->getBodyRegion()) {
context_.loadAllAvailableDialects();
}
protected:
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
mlir::OpBuilder builder_;
};
TEST_F(XlaLegalizeTargetsTest, CreatesConversionTargets) {
auto const_int = builder_.create<mlir::arith::ConstantIntOp>(
builder_.getUnknownLoc(), 10, builder_.getI32Type());
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, AllowsCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, true);
EXPECT_TRUE(target.isIllegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, DontAllowCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e75402be-4462-469a-b7e4-f4278767815f | cpp | tensorflow/tensorflow | pjrt_cpu_client_registration | tensorflow/core/tfrt/common/pjrt_cpu_client_registration.cc | tensorflow/core/tfrt/common/pjrt_cpu_client_registration_test.cc | #include <memory>
#include <utility>
#include "absl/status/statusor.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<std::unique_ptr<xla::PjRtClient>> GetCpuClient(
const PjrtClientFactoryOptions& option) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtClient> client,
xla::GetTfrtCpuClient(option.cpu_options.asynchronous));
return std::move(client);
}
REGISTER_PJRT_CLIENT_FACTORY(cpu_client, tensorflow::DEVICE_CPU, GetCpuClient);
} | #include <gtest/gtest.h>
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_options.h"
#include "tensorflow/core/tfrt/common/pjrt_client_factory_registry.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
TEST(PjrtCpuClientCreateTest, TestCpuCreateoption) {
PjrtClientFactoryOptions options = PjrtClientFactoryOptions();
options.cpu_options.asynchronous = true;
TF_ASSERT_OK_AND_ASSIGN(
auto client, xla::PjrtClientFactoryRegistry::Get().GetPjrtClient(
tsl::DeviceType(tensorflow::DEVICE_CPU), options));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_cpu_client_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/common/pjrt_cpu_client_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2dcebcc7-6c69-44ba-9f86-9cae5f936ef9 | cpp | google/tensorstore | deep_copy_transform_rep_ptr | tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h | tensorstore/index_space/deep_copy_transform_rep_ptr_test.cc | #ifndef TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#define TENSORSTORE_INDEX_SPACE_INTERNAL_DEEP_COPY_TRANSFORM_REP_PTR_H_
#include <utility>
#include "tensorstore/index_space/internal/transform_rep.h"
namespace tensorstore {
namespace internal_index_space {
class DeepCopyTransformRepPtr {
public:
DeepCopyTransformRepPtr(std::nullptr_t = nullptr) : ptr_(nullptr) {}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::adopt_object_ref_t)
: ptr_(ptr) {
assert(ptr == nullptr ||
(ptr->input_rank_capacity == 0 && ptr->output_rank_capacity == 0) ||
ptr->reference_count == 1);
}
explicit DeepCopyTransformRepPtr(TransformRep* ptr,
internal::acquire_object_ref_t) {
if (ptr) {
ptr_ =
TransformRep::Allocate(ptr->input_rank, ptr->output_rank).release();
CopyTransformRep(ptr, ptr_);
} else {
ptr_ = nullptr;
}
}
DeepCopyTransformRepPtr(DeepCopyTransformRepPtr&& other)
: ptr_(std::exchange(other.ptr_, nullptr)) {}
DeepCopyTransformRepPtr(const DeepCopyTransformRepPtr& other)
: DeepCopyTransformRepPtr(other.ptr_, internal::acquire_object_ref) {}
DeepCopyTransformRepPtr& operator=(DeepCopyTransformRepPtr&& other) {
if (ptr_) Free();
ptr_ = std::exchange(other.ptr_, nullptr);
return *this;
}
DeepCopyTransformRepPtr& operator=(const DeepCopyTransformRepPtr& other) {
return *this = DeepCopyTransformRepPtr(other.ptr_,
internal::acquire_object_ref);
}
DeepCopyTransformRepPtr& operator=(std::nullptr_t) {
if (ptr_) Free();
ptr_ = nullptr;
return *this;
}
~DeepCopyTransformRepPtr() {
if (ptr_) Free();
}
explicit operator bool() const { return static_cast<bool>(ptr_); }
TransformRep* get() const { return ptr_; }
TransformRep* operator->() const { return ptr_; }
TransformRep& operator*() const { return *ptr_; }
TransformRep* release() { return std::exchange(ptr_, nullptr); }
private:
void Free() {
TransformRep::Ptr<>(ptr_, internal::adopt_object_ref);
}
TransformRep* ptr_;
};
}
}
#endif | #include "tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::acquire_object_ref;
using ::tensorstore::internal::adopt_object_ref;
using ::tensorstore::internal_index_space::DeepCopyTransformRepPtr;
using ::tensorstore::internal_index_space::TransformRep;
TEST(DeepCopyTransformRepPtr, DefaultConstruct) {
DeepCopyTransformRepPtr ptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, Nullptr) {
DeepCopyTransformRepPtr ptr = nullptr;
EXPECT_FALSE(ptr);
EXPECT_EQ(nullptr, ptr.get());
EXPECT_EQ(nullptr, ptr.operator->());
EXPECT_EQ(nullptr, ptr.release());
}
TEST(DeepCopyTransformRepPtr, AdoptAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AdoptAllocateZero) {
auto ptr1 = TransformRep::Allocate(0, 0);
ptr1->input_rank = ptr1->output_rank = 0;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_EQ(ptr, ptr2.operator->());
EXPECT_EQ(ptr, &*ptr2);
}
TEST(DeepCopyTransformRepPtr, AcquireAllocate) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.get(), acquire_object_ref);
EXPECT_NE(ptr1.get(), ptr2.get());
EXPECT_EQ(7, ptr2->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, Release) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
auto ptr3 = ptr2.release();
EXPECT_EQ(ptr, ptr3);
TransformRep::Ptr<>(ptr3, adopt_object_ref);
}
TEST(DeepCopyTransformRepPtr, MoveConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, CopyConstruct) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
auto ptr3 = ptr2;
EXPECT_NE(ptr, ptr3.get());
EXPECT_TRUE(ptr2);
EXPECT_TRUE(ptr3);
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, AssignNullptr) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
ptr2 = nullptr;
EXPECT_EQ(nullptr, ptr2.get());
}
TEST(DeepCopyTransformRepPtr, MoveAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = std::move(ptr2);
EXPECT_EQ(ptr, ptr3.get());
EXPECT_FALSE(ptr2);
}
TEST(DeepCopyTransformRepPtr, MoveAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = std::move(ptr3);
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNonNullToNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr3 = ptr2;
EXPECT_TRUE(ptr2);
EXPECT_EQ(ptr, ptr2.get());
EXPECT_NE(ptr, ptr3.get());
EXPECT_EQ(7, ptr3->input_origin()[0]);
}
TEST(DeepCopyTransformRepPtr, CopyAssignNullToNonNull) {
auto ptr1 = TransformRep::Allocate(1, 1);
ptr1->input_rank = ptr1->output_rank = 1;
auto ptr = ptr1.get();
ptr1->input_origin()[0] = 7;
DeepCopyTransformRepPtr ptr2(ptr1.release(), adopt_object_ref);
EXPECT_EQ(ptr, ptr2.get());
DeepCopyTransformRepPtr ptr3;
ptr2 = ptr3;
EXPECT_FALSE(ptr2);
EXPECT_FALSE(ptr3);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/deep_copy_transform_rep_ptr.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/deep_copy_transform_rep_ptr_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
94107fc2-df0d-4834-b084-1a8bd6d86636 | cpp | tensorflow/tensorflow | resize_nearest_neighbor_op | tensorflow/core/kernels/image/resize_nearest_neighbor_op.cc | tensorflow/core/kernels/image/resize_nearest_neighbor_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/image/resize_nearest_neighbor_op.h"
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/image_resizer_state.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
template <typename Device, typename T>
class ResizeNearestNeighborOp : public OpKernel {
public:
explicit ResizeNearestNeighborOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
ImageResizerState st(align_corners_, half_pixel_centers_);
st.ValidateAndCreateOutput(context);
if (!context->status().ok()) return;
OP_REQUIRES(context, st.in_height < (1 << 24) && st.in_width < (1 << 24),
errors::InvalidArgument("nearest neighbor requires max height "
"& width of 2^24"));
if (st.output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(
context->input(0).tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>());
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
} else {
if (align_corners_) {
status = functor::ResizeNearestNeighbor<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
} else {
status = functor::ResizeNearestNeighbor<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, st.height_scale,
st.width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighbor"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
template <bool half_pixel_centers>
struct BoolToScaler {};
struct HalfPixelScalerForNN {
inline float operator()(const int x, const float scale) const {
return (static_cast<float>(x) + 0.5f) * scale;
}
};
template <>
struct BoolToScaler<true> {
typedef HalfPixelScalerForNN Scaler;
};
template <>
struct BoolToScaler<false> {
typedef LegacyScaler Scaler;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighbor<CPUDevice, T, half_pixel_centers, align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
#ifdef PLATFORM_GOOGLE
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index y = 0; y < out_height; ++y) {
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
for (Eigen::Index x = 0; x < out_width; ++x) {
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(b, in_y, in_x, 0), channels, &output(b, y, x, 0));
}
}
}
#else
auto ParallelResize = [&](Eigen::Index start, Eigen::Index end) {
for (Eigen::Index b = start; b < end; ++b) {
Eigen::Index x = b % out_width;
Eigen::Index y = (b / out_width) % out_height;
Eigen::Index bs = (b / out_width) / out_height;
Eigen::Index in_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
in_height - 1);
if (half_pixel_centers) {
in_y = std::max(static_cast<Eigen::Index>(0), in_y);
}
Eigen::Index in_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
in_width - 1);
if (half_pixel_centers) {
in_x = std::max(static_cast<Eigen::Index>(0), in_x);
}
std::copy_n(&input(bs, in_y, in_x, 0), channels, &output(bs, y, x, 0));
}
};
Eigen::Index N = batch_size * out_height * out_width;
const int input_bytes = channels * sizeof(T);
const int output_bytes = channels * sizeof(T);
const int compute_cycles = (Eigen::TensorOpCost::ModCost<T>() * 2 +
Eigen::TensorOpCost::DivCost<T>() * 3 +
Eigen::TensorOpCost::AddCost<T>() * 2 +
Eigen::TensorOpCost::MulCost<T>() * 2);
const Eigen::TensorOpCost cost(input_bytes, output_bytes, compute_cycles);
d.parallelFor(N, cost, ParallelResize);
#endif
return true;
}
};
}
template <typename Device, typename T>
class ResizeNearestNeighborOpGrad : public OpKernel {
public:
explicit ResizeNearestNeighborOpGrad(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("align_corners", &align_corners_));
OP_REQUIRES_OK(
context, context->GetAttr("half_pixel_centers", &half_pixel_centers_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 4,
errors::InvalidArgument("input must be 4-dimensional",
input.shape().DebugString()));
const Tensor& shape_t = context->input(1);
OP_REQUIRES(context, shape_t.dims() == 1,
errors::InvalidArgument("shape_t must be 1-dimensional",
shape_t.shape().DebugString()));
OP_REQUIRES(context, shape_t.NumElements() == 2,
errors::InvalidArgument("shape_t must have two elements",
shape_t.shape().DebugString()));
auto sizes = shape_t.vec<int32>();
OP_REQUIRES(context, sizes(0) > 0 && sizes(1) > 0,
errors::InvalidArgument("shape_t's elements must be positive"));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
context, !OpDeterminismRequired(),
errors::Unimplemented(
"A deterministic GPU implementation of ResizeNearestNeighborGrad"
" is not currently available."));
}
const int64_t batch_size = input.dim_size(0);
const int64_t in_height = input.dim_size(1);
const int64_t in_width = input.dim_size(2);
const int64_t channels = input.dim_size(3);
const int64_t out_height = sizes(0);
const int64_t out_width = sizes(1);
Tensor* output = nullptr;
TensorShape shape;
OP_REQUIRES_OK(context,
TensorShape::BuildTensorShape(
{batch_size, out_height, out_width, channels}, &shape));
OP_REQUIRES_OK(context, context->allocate_output(0, shape, &output));
if (output->NumElements() == 0) return;
typename TTypes<T, 4>::ConstTensor input_data(input.tensor<T, 4>());
typename TTypes<T, 4>::Tensor output_data(output->tensor<T, 4>());
const float height_scale =
CalculateResizeScale(out_height, in_height, align_corners_);
const float width_scale =
CalculateResizeScale(out_width, in_width, align_corners_);
bool status;
if (half_pixel_centers_) {
if (align_corners_) {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status = functor::ResizeNearestNeighborGrad<Device, T,
true,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
} else {
if (align_corners_) {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
true>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
} else {
status =
functor::ResizeNearestNeighborGrad<Device, T,
false,
false>()(
context->eigen_device<Device>(), input_data, height_scale,
width_scale, output_data);
}
}
if (!status) {
context->SetStatus(
errors::Internal("Failed launching ResizeNearestNeighborGrad"));
}
}
private:
bool align_corners_;
bool half_pixel_centers_;
};
namespace functor {
template <typename T, bool half_pixel_centers, bool align_corners>
struct ResizeNearestNeighborGrad<CPUDevice, T, half_pixel_centers,
align_corners> {
bool operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input,
const float height_scale, const float width_scale,
typename TTypes<T, 4>::Tensor output) {
typename BoolToScaler<half_pixel_centers>::Scaler scaler;
const Eigen::Index batch_size = input.dimension(0);
const Eigen::Index in_height = input.dimension(1);
const Eigen::Index in_width = input.dimension(2);
const Eigen::Index channels = input.dimension(3);
const Eigen::Index out_height = output.dimension(1);
const Eigen::Index out_width = output.dimension(2);
output.setZero();
for (Eigen::Index y = 0; y < in_height; ++y) {
const Eigen::Index out_y = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(y, height_scale)))
: static_cast<Eigen::Index>(floorf(scaler(y, height_scale))),
out_height - 1);
for (Eigen::Index x = 0; x < in_width; ++x) {
const Eigen::Index out_x = std::min(
(align_corners)
? static_cast<Eigen::Index>(roundf(scaler(x, width_scale)))
: static_cast<Eigen::Index>(floorf(scaler(x, width_scale))),
out_width - 1);
for (Eigen::Index b = 0; b < batch_size; ++b) {
for (Eigen::Index c = 0; c < channels; ++c) {
output(b, out_y, out_x, c) += input(b, y, x, c);
}
}
}
}
return true;
}
};
}
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<CPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<CPUDevice, T>);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_KERNEL(T) \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighbor") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOp<GPUDevice, T>); \
REGISTER_KERNEL_BUILDER(Name("ResizeNearestNeighborGrad") \
.Device(DEVICE_GPU) \
.TypeConstraint<T>("T") \
.HostMemory("size"), \
ResizeNearestNeighborOpGrad<GPUDevice, T>);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_KERNEL);
#undef REGISTER_KERNEL
#endif
} | #include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
enum class TestDevice { kCPU, kGPU };
class ResizeNearestNeighborOpTestBase
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
explicit ResizeNearestNeighborOpTestBase(bool half_pixel_centers)
: align_corners_(false), half_pixel_centers_(half_pixel_centers) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Attr("half_pixel_centers", half_pixel_centers_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
bool half_pixel_centers_;
};
class ResizeNearestNeighborOpTest : public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborOpTest() : ResizeNearestNeighborOpTestBase(false) {}
};
class ResizeNearestNeighborHalfPixelCentersOpTest
: public ResizeNearestNeighborOpTestBase {
protected:
ResizeNearestNeighborHalfPixelCentersOpTest()
: ResizeNearestNeighborOpTestBase(true) {}
};
class ResizeNearestNeighborOpAlignCornersTest
: public OpsTestBase,
public ::testing::WithParamInterface<TestDevice> {
protected:
ResizeNearestNeighborOpAlignCornersTest() : align_corners_(true) {}
void SetUp() override {
if (GetParam() == TestDevice::kGPU) {
std::unique_ptr<Device> device_gpu(
DeviceFactory::NewDevice("GPU", {},
"/job:a/replica:0/task:0"));
SetDevice(DEVICE_GPU, std::move(device_gpu));
}
TF_EXPECT_OK(NodeDefBuilder("resize_nn_op", "ResizeNearestNeighbor")
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("align_corners", align_corners_)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
}
bool align_corners_;
};
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearest2x2AlignCornersTo1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {1});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 1, 2,
1, 1, 2,
3, 3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
4, 5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestAlignCorners3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 1, 2, 2,
3, 3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 3,
5, 6, 7,
9, 10, 11});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpAlignCornersTest,
TestNearestNeighborAlignCorners4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{ 1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
1, 2,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborOpTest, TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 1,
1, 2, 2,
1, 1, 1,
1, 2, 2,
3, 3, 3,
3, 4, 4,
5, 5, 5,
5, 6, 6,
5, 5, 5,
5, 6, 6,
7, 7, 7,
7, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest5x2To2x2) {
AddInputFromArray<float>(TensorShape({1, 2, 5, 1}),
{1, 2, 3, 4, 5, 1, 2, 3, 4, 5});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected, {2, 4, 2, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To1x1) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {1, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 1, 1, 1}));
test::FillValues<float>(&expected, {4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To3x3) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 2, 2,
3, 4, 4,
3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest3x3To2x2) {
AddInputFromArray<float>(TensorShape({1, 3, 3, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9});
AddInputFromArray<int32>(TensorShape({2}), {2, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 2, 1}));
test::FillValues<float>(&expected,
{1, 3,
7, 9});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To2x5) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {2, 5});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 2, 5, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2,
3, 3, 4, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearestNeighbor4x4To3x3) {
AddInputFromArray<float>(
TensorShape({1, 4, 4, 1}),
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 3, 3, 1}));
test::FillValues<float>(&expected,
{1, 3, 4,
9, 11, 12,
13, 15, 16});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To5x2) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {5, 2});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 5, 2, 1}));
test::FillValues<float>(&expected,
{1, 2,
1, 2,
3, 4,
3, 4,
3, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest, TestNearest2x2To4x4) {
AddInputFromArray<float>(TensorShape({1, 2, 2, 1}), {1, 2, 3, 4});
AddInputFromArray<int32>(TensorShape({2}), {4, 4});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({1, 4, 4, 1}));
test::FillValues<float>(&expected,
{1, 1, 2, 2,
1, 1, 2, 2,
3, 3, 4, 4,
3, 3, 4, 4});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_P(ResizeNearestNeighborHalfPixelCentersOpTest,
TestNearest2x2x2x2To2x3x3x2) {
AddInputFromArray<float>(TensorShape({2, 2, 2, 2}),
{1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8});
AddInputFromArray<int32>(TensorShape({2}), {3, 3});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3, 3, 2}));
test::FillValues<float>(&expected,
{1, 1, 2, 2, 2, 2,
3, 3, 4, 4, 4, 4,
3, 3, 4, 4, 4, 4,
5, 5, 6, 6, 6, 6,
7, 7, 8, 8, 8, 8,
7, 7, 8, 8, 8, 8});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestCpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestCpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kCPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestCpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kCPU));
#if GOOGLE_CUDA
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpTestGpu,
ResizeNearestNeighborOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborHalfPixelCentersOpTestGpu,
ResizeNearestNeighborHalfPixelCentersOpTest,
::testing::Values(TestDevice::kGPU));
INSTANTIATE_TEST_SUITE_P(ResizeNearestNeighborOpAlignCornersTestGpu,
ResizeNearestNeighborOpAlignCornersTest,
::testing::Values(TestDevice::kGPU));
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_nearest_neighbor_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/image/resize_nearest_neighbor_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
972db8a3-1645-43d4-94a7-46408a04c7bc | cpp | tensorflow/tensorflow | tflite_tensor_view | tensorflow/lite/kernels/shim/tflite_tensor_view.cc | tensorflow/lite/kernels/shim/tflite_tensor_view_test.cc | #include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/variant.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/shim/tensor_view.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/type_to_tflitetype.h"
#define CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(TFLITE_DTYPE, CPP_DTYPE) \
case TFLITE_DTYPE: { \
using DType = typename CPP_DTYPE; \
return TfLiteTensorView(wrapped_tensor, DType()); \
}
#define CASE_FOR_DTYPE(TFLITE_DTYPE) \
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE( \
TFLITE_DTYPE, ::tflite::TfLiteTypeToType<TFLITE_DTYPE>::Type)
namespace tflite {
namespace shim {
TfLiteTensorView::TfLiteTensorView(::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
wrapped_tensor_(wrapped_tensor),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(const ::TfLiteTensor *wrapped_tensor,
const ::tensorflow::tstring &dtype)
: TensorView(absl::Span<int>(wrapped_tensor->dims->data,
wrapped_tensor->dims->size),
nullptr, 0, dtype),
const_wrapped_tensor_(wrapped_tensor) {
InitForStringDType();
}
TfLiteTensorView::TfLiteTensorView(TfLiteTensorView &&o) noexcept
: TensorView(std::move(o)),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(std::move(o.str_vec_)) {
}
TfLiteTensorView::TfLiteTensorView(const TfLiteTensorView &o)
: TensorView(o),
wrapped_tensor_(o.wrapped_tensor_),
const_wrapped_tensor_(o.const_wrapped_tensor_),
str_vec_(o.str_vec_) {
}
TfLiteTensorView &TfLiteTensorView::operator=(TfLiteTensorView &&o) noexcept {
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = std::move(o.str_vec_);
TensorView::operator=(std::move(o));
return *this;
}
TfLiteTensorView &TfLiteTensorView::operator=(const TfLiteTensorView &o) {
if (&o == this) return *this;
TensorView::operator=(o);
wrapped_tensor_ = o.wrapped_tensor_;
const_wrapped_tensor_ = o.const_wrapped_tensor_;
str_vec_ = o.str_vec_;
return *this;
}
void TfLiteTensorView::InitForStringDType() {
if (str_vec_ == nullptr) {
str_vec_ = std::make_shared<StringBuffer>(this);
}
data_ = absl::Span<::tensorflow::tstring>(str_vec_->buffer);
}
TfLiteTensorView::StringBuffer::StringBuffer(TfLiteTensorView *t_view)
: wrapped_tensor(t_view->wrapped_tensor_) {
buffer.resize(NumElements(t_view->shape_));
const auto const_wrapped_tensor = t_view->const_wrapped_tensor_;
std::size_t str_count;
if (const_wrapped_tensor->data.raw == nullptr)
str_count = 0;
else
str_count = ::tflite::GetStringCount(const_wrapped_tensor);
for (int i = 0; i < str_count; ++i) {
const auto str_ref = ::tflite::GetString(const_wrapped_tensor, i);
buffer[i].assign_as_view(str_ref.str, str_ref.len);
}
}
TfLiteTensorView::StringBuffer::~StringBuffer() {
if (wrapped_tensor == nullptr) return;
tflite::DynamicBuffer buf;
for (const auto &s : buffer) buf.AddString(s.data(), s.length());
buf.WriteToTensor(wrapped_tensor, nullptr);
}
template <typename TfLiteTensorType>
absl::StatusOr<
typename MatchConstNess<TfLiteTensorType, TfLiteTensorView>::Type>
TfLiteTensorViewTemplatizedNew(TfLiteTensorType *wrapped_tensor) {
switch (wrapped_tensor->type) {
CASE_FOR_DTYPE(kTfLiteBool);
CASE_FOR_DTYPE(kTfLiteUInt8);
CASE_FOR_DTYPE(kTfLiteUInt64);
CASE_FOR_DTYPE(kTfLiteInt8);
CASE_FOR_DTYPE(kTfLiteInt16);
CASE_FOR_DTYPE(kTfLiteInt32);
CASE_FOR_DTYPE(kTfLiteInt64);
CASE_FOR_DTYPE(kTfLiteFloat32);
CASE_FOR_DTYPE(kTfLiteFloat64);
CASE_FOR_DTYPE_GIVEN_CPP_DTYPE(kTfLiteString, ::tensorflow::tstring);
default: {
return absl::UnimplementedError(
absl::StrCat("Unsupported dtype: ", wrapped_tensor->type));
}
}
}
template <>
absl::StatusOr<TfLiteTensorView> TensorView::New<::TfLiteTensor>(
::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
template <>
absl::StatusOr<const TfLiteTensorView> TensorView::New<const ::TfLiteTensor>(
const ::TfLiteTensor *wrapped_tensor) {
return TfLiteTensorViewTemplatizedNew(wrapped_tensor);
}
}
} | #include "tensorflow/lite/kernels/shim/tflite_tensor_view.h"
#include <cstdint>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/shim/test_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace shim {
namespace {
using ::testing::Eq;
TEST(TfLiteTensorW, Bool) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<bool>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_bool";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<bool>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = (i % 5 == 0);
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[1, 0], [0, 0], [0, 1]]"));
}
template <typename IntType>
void IntTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<IntType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_int";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_premove_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_premove_or.ok()) << t_premove_or.status();
auto t = std::move(t_premove_or.value());
auto data = t.Data<IntType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = i;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 1], [2, 3], [4, 5]]"));
}
TEST(TfLiteTensorW, Int8) { IntTest<int8_t>(); }
TEST(TfLiteTensorW, UInt8) { IntTest<uint8_t>(); }
TEST(TfLiteTensorW, Int16) { IntTest<int16_t>(); }
TEST(TfLiteTensorW, Int32) { IntTest<int32_t>(); }
TEST(TfLiteTensorW, Int64) { IntTest<int64_t>(); }
template <typename FloatType>
void FloatTest() {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<FloatType>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_float";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto data = t.Data<FloatType>();
for (int32_t i = 0; i < 3 * 2; ++i) data[i] = static_cast<FloatType>(i) / 2.;
ASSERT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[0, 0.5], [1, 1.5], [2, 2.5]]"));
}
TEST(TfLiteTensorW, Float) { FloatTest<float>(); }
TEST(TfLiteTensorW, Double) { FloatTest<double>(); }
TEST(TfLiteTensorW, Str) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({3, 2}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
auto t_mat = t.As<::tensorflow::tstring, 2>();
t.Data<::tensorflow::tstring>()[0] = "a";
t.Data<::tensorflow::tstring>()[1] = "bc";
t_mat(1, 0) = "def";
t.Data<::tensorflow::tstring>()[3] = "g";
t.Data<::tensorflow::tstring>()[4] = "";
t_mat(2, 1) = "hi";
}
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
const auto const_tflite_tensor = tflite_tensor;
{
const auto t_or = TensorView::New(const_tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
const auto& t = t_or.value();
EXPECT_THAT(t.Data<::tensorflow::tstring>(),
::testing::ElementsAre("a", "bc", "def", "g", "", "hi"));
}
EXPECT_THAT(TfliteTensorDebugString(tflite_tensor),
Eq("[[a, bc], [def, g], [, hi]]"));
}
TEST(TfLiteTensorW, EmptyStr) {
::tflite::Interpreter interpreter;
interpreter.AddTensors(1);
interpreter.AllocateTensors();
auto* tflite_tensor = interpreter.tensor(0);
ReallocDynamicTensor<std::string>({0}, tflite_tensor);
tflite_tensor->name = "test_str";
auto owned_tflite_tensor = UniqueTfLiteTensor(tflite_tensor);
{
auto t_or = TensorView::New(tflite_tensor);
ASSERT_TRUE(t_or.ok()) << t_or.status();
}
EXPECT_THAT(GetStringCount(tflite_tensor), Eq(0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_tensor_view.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_tensor_view_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7e021945-fa0a-45dd-bdad-66d1e9d09bc0 | cpp | google/quiche | quic_data_writer | quiche/quic/core/quic_data_writer.cc | quiche/quic/core/quic_data_writer_test.cc | #include "quiche/quic/core/quic_data_writer.h"
#include <algorithm>
#include <limits>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
QuicDataWriter::QuicDataWriter(size_t size, char* buffer)
: quiche::QuicheDataWriter(size, buffer) {}
QuicDataWriter::QuicDataWriter(size_t size, char* buffer,
quiche::Endianness endianness)
: quiche::QuicheDataWriter(size, buffer, endianness) {}
QuicDataWriter::~QuicDataWriter() {}
bool QuicDataWriter::WriteUFloat16(uint64_t value) {
uint16_t result;
if (value < (UINT64_C(1) << kUFloat16MantissaEffectiveBits)) {
result = static_cast<uint16_t>(value);
} else if (value >= kUFloat16MaxValue) {
result = std::numeric_limits<uint16_t>::max();
} else {
uint16_t exponent = 0;
for (uint16_t offset = 16; offset > 0; offset /= 2) {
if (value >= (UINT64_C(1) << (kUFloat16MantissaBits + offset))) {
exponent += offset;
value >>= offset;
}
}
QUICHE_DCHECK_GE(exponent, 1);
QUICHE_DCHECK_LE(exponent, kUFloat16MaxExponent);
QUICHE_DCHECK_GE(value, UINT64_C(1) << kUFloat16MantissaBits);
QUICHE_DCHECK_LT(value, UINT64_C(1) << kUFloat16MantissaEffectiveBits);
result = static_cast<uint16_t>(value + (exponent << kUFloat16MantissaBits));
}
if (endianness() == quiche::NETWORK_BYTE_ORDER) {
result = quiche::QuicheEndian::HostToNet16(result);
}
return WriteBytes(&result, sizeof(result));
}
bool QuicDataWriter::WriteConnectionId(QuicConnectionId connection_id) {
if (connection_id.IsEmpty()) {
return true;
}
return WriteBytes(connection_id.data(), connection_id.length());
}
bool QuicDataWriter::WriteLengthPrefixedConnectionId(
QuicConnectionId connection_id) {
return WriteUInt8(connection_id.length()) && WriteConnectionId(connection_id);
}
bool QuicDataWriter::WriteRandomBytes(QuicRandom* random, size_t length) {
char* dest = BeginWrite(length);
if (!dest) {
return false;
}
random->RandBytes(dest, length);
IncreaseLength(length);
return true;
}
bool QuicDataWriter::WriteInsecureRandomBytes(QuicRandom* random,
size_t length) {
char* dest = BeginWrite(length);
if (!dest) {
return false;
}
random->InsecureRandBytes(dest, length);
IncreaseLength(length);
return true;
}
} | #include "quiche/quic/core/quic_data_writer.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
char* AsChars(unsigned char* data) { return reinterpret_cast<char*>(data); }
struct TestParams {
explicit TestParams(quiche::Endianness endianness) : endianness(endianness) {}
quiche::Endianness endianness;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
(p.endianness == quiche::NETWORK_BYTE_ORDER ? "Network" : "Host"),
"ByteOrder");
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (quiche::Endianness endianness :
{quiche::NETWORK_BYTE_ORDER, quiche::HOST_BYTE_ORDER}) {
params.push_back(TestParams(endianness));
}
return params;
}
class QuicDataWriterTest : public QuicTestWithParam<TestParams> {};
INSTANTIATE_TEST_SUITE_P(QuicDataWriterTests, QuicDataWriterTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicDataWriterTest, SanityCheckUFloat16Consts) {
EXPECT_EQ(30, kUFloat16MaxExponent);
EXPECT_EQ(11, kUFloat16MantissaBits);
EXPECT_EQ(12, kUFloat16MantissaEffectiveBits);
EXPECT_EQ(UINT64_C(0x3FFC0000000), kUFloat16MaxValue);
}
TEST_P(QuicDataWriterTest, WriteUFloat16) {
struct TestCase {
uint64_t decoded;
uint16_t encoded;
};
TestCase test_cases[] = {
{0, 0},
{1, 1},
{2, 2},
{3, 3},
{4, 4},
{5, 5},
{6, 6},
{7, 7},
{15, 15},
{31, 31},
{42, 42},
{123, 123},
{1234, 1234},
{2046, 2046},
{2047, 2047},
{2048, 2048},
{2049, 2049},
{4094, 4094},
{4095, 4095},
{4096, 4096},
{4097, 4096},
{4098, 4097},
{4099, 4097},
{4100, 4098},
{4101, 4098},
{8190, 6143},
{8191, 6143},
{8192, 6144},
{8193, 6144},
{8194, 6144},
{8195, 6144},
{8196, 6145},
{8197, 6145},
{0x7FF8000, 0x87FF},
{0x7FFFFFF, 0x87FF},
{0x8000000, 0x8800},
{0xFFF0000, 0x8FFF},
{0xFFFFFFF, 0x8FFF},
{0x10000000, 0x9000},
{0x1FFFFFFFFFE, 0xF7FF},
{0x1FFFFFFFFFF, 0xF7FF},
{0x20000000000, 0xF800},
{0x20000000001, 0xF800},
{0x2003FFFFFFE, 0xF800},
{0x2003FFFFFFF, 0xF800},
{0x20040000000, 0xF801},
{0x20040000001, 0xF801},
{0x3FF80000000, 0xFFFE},
{0x3FFBFFFFFFF, 0xFFFE},
{0x3FFC0000000, 0xFFFF},
{0x3FFC0000001, 0xFFFF},
{0x3FFFFFFFFFF, 0xFFFF},
{0x40000000000, 0xFFFF},
{0xFFFFFFFFFFFFFFFF, 0xFFFF},
};
int num_test_cases = sizeof(test_cases) / sizeof(test_cases[0]);
for (int i = 0; i < num_test_cases; ++i) {
char buffer[2];
QuicDataWriter writer(2, buffer, GetParam().endianness);
EXPECT_TRUE(writer.WriteUFloat16(test_cases[i].decoded));
uint16_t result = *reinterpret_cast<uint16_t*>(writer.data());
if (GetParam().endianness == quiche::NETWORK_BYTE_ORDER) {
result = quiche::QuicheEndian::HostToNet16(result);
}
EXPECT_EQ(test_cases[i].encoded, result);
}
}
TEST_P(QuicDataWriterTest, ReadUFloat16) {
struct TestCase {
uint64_t decoded;
uint16_t encoded;
};
TestCase test_cases[] = {
{0, 0},
{1, 1},
{2, 2},
{3, 3},
{4, 4},
{5, 5},
{6, 6},
{7, 7},
{15, 15},
{31, 31},
{42, 42},
{123, 123},
{1234, 1234},
{2046, 2046},
{2047, 2047},
{2048, 2048},
{2049, 2049},
{4094, 4094},
{4095, 4095},
{4096, 4096},
{4098, 4097},
{4100, 4098},
{8190, 6143},
{8192, 6144},
{8196, 6145},
{0x7FF8000, 0x87FF},
{0x8000000, 0x8800},
{0xFFF0000, 0x8FFF},
{0x10000000, 0x9000},
{0x1FFE0000000, 0xF7FF},
{0x20000000000, 0xF800},
{0x20040000000, 0xF801},
{0x3FF80000000, 0xFFFE},
{0x3FFC0000000, 0xFFFF},
};
int num_test_cases = sizeof(test_cases) / sizeof(test_cases[0]);
for (int i = 0; i < num_test_cases; ++i) {
uint16_t encoded_ufloat = test_cases[i].encoded;
if (GetParam().endianness == quiche::NETWORK_BYTE_ORDER) {
encoded_ufloat = quiche::QuicheEndian::HostToNet16(encoded_ufloat);
}
QuicDataReader reader(reinterpret_cast<char*>(&encoded_ufloat), 2,
GetParam().endianness);
uint64_t value;
EXPECT_TRUE(reader.ReadUFloat16(&value));
EXPECT_EQ(test_cases[i].decoded, value);
}
}
TEST_P(QuicDataWriterTest, RoundTripUFloat16) {
uint64_t previous_value = 0;
for (uint16_t i = 1; i < 0xFFFF; ++i) {
uint16_t read_number = i;
if (GetParam().endianness == quiche::NETWORK_BYTE_ORDER) {
read_number = quiche::QuicheEndian::HostToNet16(read_number);
}
QuicDataReader reader(reinterpret_cast<char*>(&read_number), 2,
GetParam().endianness);
uint64_t value;
EXPECT_TRUE(reader.ReadUFloat16(&value));
if (i < 4097) {
EXPECT_EQ(i, value);
}
EXPECT_LT(previous_value, value);
if (i > 2000) {
EXPECT_GT(previous_value * 1005, value * 1000);
}
EXPECT_LT(value, UINT64_C(0x3FFC0000000));
previous_value = value;
char buffer[6];
QuicDataWriter writer(6, buffer, GetParam().endianness);
EXPECT_TRUE(writer.WriteUFloat16(value - 1));
EXPECT_TRUE(writer.WriteUFloat16(value));
EXPECT_TRUE(writer.WriteUFloat16(value + 1));
uint16_t encoded1 = *reinterpret_cast<uint16_t*>(writer.data());
uint16_t encoded2 = *reinterpret_cast<uint16_t*>(writer.data() + 2);
uint16_t encoded3 = *reinterpret_cast<uint16_t*>(writer.data() + 4);
if (GetParam().endianness == quiche::NETWORK_BYTE_ORDER) {
encoded1 = quiche::QuicheEndian::NetToHost16(encoded1);
encoded2 = quiche::QuicheEndian::NetToHost16(encoded2);
encoded3 = quiche::QuicheEndian::NetToHost16(encoded3);
}
EXPECT_EQ(i - 1, encoded1);
EXPECT_EQ(i, encoded2);
EXPECT_EQ(i < 4096 ? i + 1 : i, encoded3);
}
}
TEST_P(QuicDataWriterTest, WriteConnectionId) {
QuicConnectionId connection_id =
TestConnectionId(UINT64_C(0x0011223344556677));
char big_endian[] = {
0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
};
EXPECT_EQ(connection_id.length(), ABSL_ARRAYSIZE(big_endian));
ASSERT_LE(connection_id.length(), 255);
char buffer[255];
QuicDataWriter writer(connection_id.length(), buffer, GetParam().endianness);
EXPECT_TRUE(writer.WriteConnectionId(connection_id));
quiche::test::CompareCharArraysWithHexError(
"connection_id", buffer, connection_id.length(), big_endian,
connection_id.length());
QuicConnectionId read_connection_id;
QuicDataReader reader(buffer, connection_id.length(), GetParam().endianness);
EXPECT_TRUE(
reader.ReadConnectionId(&read_connection_id, ABSL_ARRAYSIZE(big_endian)));
EXPECT_EQ(connection_id, read_connection_id);
}
TEST_P(QuicDataWriterTest, LengthPrefixedConnectionId) {
QuicConnectionId connection_id =
TestConnectionId(UINT64_C(0x0011223344556677));
char length_prefixed_connection_id[] = {
0x08, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
};
EXPECT_EQ(ABSL_ARRAYSIZE(length_prefixed_connection_id),
kConnectionIdLengthSize + connection_id.length());
char buffer[kConnectionIdLengthSize + 255] = {};
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer);
EXPECT_TRUE(writer.WriteLengthPrefixedConnectionId(connection_id));
quiche::test::CompareCharArraysWithHexError(
"WriteLengthPrefixedConnectionId", buffer, writer.length(),
length_prefixed_connection_id,
ABSL_ARRAYSIZE(length_prefixed_connection_id));
memset(buffer, 0, ABSL_ARRAYSIZE(buffer));
QuicDataWriter writer2(ABSL_ARRAYSIZE(buffer), buffer);
EXPECT_TRUE(writer2.WriteUInt8(connection_id.length()));
EXPECT_TRUE(writer2.WriteConnectionId(connection_id));
quiche::test::CompareCharArraysWithHexError(
"Write length then ConnectionId", buffer, writer2.length(),
length_prefixed_connection_id,
ABSL_ARRAYSIZE(length_prefixed_connection_id));
QuicConnectionId read_connection_id;
QuicDataReader reader(buffer, ABSL_ARRAYSIZE(buffer));
EXPECT_TRUE(reader.ReadLengthPrefixedConnectionId(&read_connection_id));
EXPECT_EQ(connection_id, read_connection_id);
uint8_t read_connection_id_length2 = 33;
QuicConnectionId read_connection_id2;
QuicDataReader reader2(buffer, ABSL_ARRAYSIZE(buffer));
ASSERT_TRUE(reader2.ReadUInt8(&read_connection_id_length2));
EXPECT_EQ(connection_id.length(), read_connection_id_length2);
EXPECT_TRUE(reader2.ReadConnectionId(&read_connection_id2,
read_connection_id_length2));
EXPECT_EQ(connection_id, read_connection_id2);
}
TEST_P(QuicDataWriterTest, EmptyConnectionIds) {
QuicConnectionId empty_connection_id = EmptyQuicConnectionId();
char buffer[2];
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer, GetParam().endianness);
EXPECT_TRUE(writer.WriteConnectionId(empty_connection_id));
EXPECT_TRUE(writer.WriteUInt8(1));
EXPECT_TRUE(writer.WriteConnectionId(empty_connection_id));
EXPECT_TRUE(writer.WriteUInt8(2));
EXPECT_TRUE(writer.WriteConnectionId(empty_connection_id));
EXPECT_FALSE(writer.WriteUInt8(3));
EXPECT_EQ(buffer[0], 1);
EXPECT_EQ(buffer[1], 2);
QuicConnectionId read_connection_id = TestConnectionId();
uint8_t read_byte;
QuicDataReader reader(buffer, ABSL_ARRAYSIZE(buffer), GetParam().endianness);
EXPECT_TRUE(reader.ReadConnectionId(&read_connection_id, 0));
EXPECT_EQ(read_connection_id, empty_connection_id);
EXPECT_TRUE(reader.ReadUInt8(&read_byte));
EXPECT_EQ(read_byte, 1);
read_connection_id = TestConnectionId();
EXPECT_TRUE(reader.ReadConnectionId(&read_connection_id, 0));
EXPECT_EQ(read_connection_id, empty_connection_id);
EXPECT_TRUE(reader.ReadUInt8(&read_byte));
EXPECT_EQ(read_byte, 2);
read_connection_id = TestConnectionId();
EXPECT_TRUE(reader.ReadConnectionId(&read_connection_id, 0));
EXPECT_EQ(read_connection_id, empty_connection_id);
EXPECT_FALSE(reader.ReadUInt8(&read_byte));
}
TEST_P(QuicDataWriterTest, WriteTag) {
char CHLO[] = {
'C',
'H',
'L',
'O',
};
const int kBufferLength = sizeof(QuicTag);
char buffer[kBufferLength];
QuicDataWriter writer(kBufferLength, buffer, GetParam().endianness);
writer.WriteTag(kCHLO);
quiche::test::CompareCharArraysWithHexError("CHLO", buffer, kBufferLength,
CHLO, kBufferLength);
QuicTag read_chlo;
QuicDataReader reader(buffer, kBufferLength, GetParam().endianness);
reader.ReadTag(&read_chlo);
EXPECT_EQ(kCHLO, read_chlo);
}
TEST_P(QuicDataWriterTest, Write16BitUnsignedIntegers) {
char little_endian16[] = {0x22, 0x11};
char big_endian16[] = {0x11, 0x22};
char buffer16[2];
{
uint16_t in_memory16 = 0x1122;
QuicDataWriter writer(2, buffer16, GetParam().endianness);
writer.WriteUInt16(in_memory16);
quiche::test::CompareCharArraysWithHexError(
"uint16_t", buffer16, 2,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian16
: little_endian16,
2);
uint16_t read_number16;
QuicDataReader reader(buffer16, 2, GetParam().endianness);
reader.ReadUInt16(&read_number16);
EXPECT_EQ(in_memory16, read_number16);
}
{
uint64_t in_memory16 = 0x0000000000001122;
QuicDataWriter writer(2, buffer16, GetParam().endianness);
writer.WriteBytesToUInt64(2, in_memory16);
quiche::test::CompareCharArraysWithHexError(
"uint16_t", buffer16, 2,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian16
: little_endian16,
2);
uint64_t read_number16;
QuicDataReader reader(buffer16, 2, GetParam().endianness);
reader.ReadBytesToUInt64(2, &read_number16);
EXPECT_EQ(in_memory16, read_number16);
}
}
TEST_P(QuicDataWriterTest, Write24BitUnsignedIntegers) {
char little_endian24[] = {0x33, 0x22, 0x11};
char big_endian24[] = {0x11, 0x22, 0x33};
char buffer24[3];
uint64_t in_memory24 = 0x0000000000112233;
QuicDataWriter writer(3, buffer24, GetParam().endianness);
writer.WriteBytesToUInt64(3, in_memory24);
quiche::test::CompareCharArraysWithHexError(
"uint24", buffer24, 3,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian24
: little_endian24,
3);
uint64_t read_number24;
QuicDataReader reader(buffer24, 3, GetParam().endianness);
reader.ReadBytesToUInt64(3, &read_number24);
EXPECT_EQ(in_memory24, read_number24);
}
TEST_P(QuicDataWriterTest, Write32BitUnsignedIntegers) {
char little_endian32[] = {0x44, 0x33, 0x22, 0x11};
char big_endian32[] = {0x11, 0x22, 0x33, 0x44};
char buffer32[4];
{
uint32_t in_memory32 = 0x11223344;
QuicDataWriter writer(4, buffer32, GetParam().endianness);
writer.WriteUInt32(in_memory32);
quiche::test::CompareCharArraysWithHexError(
"uint32_t", buffer32, 4,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian32
: little_endian32,
4);
uint32_t read_number32;
QuicDataReader reader(buffer32, 4, GetParam().endianness);
reader.ReadUInt32(&read_number32);
EXPECT_EQ(in_memory32, read_number32);
}
{
uint64_t in_memory32 = 0x11223344;
QuicDataWriter writer(4, buffer32, GetParam().endianness);
writer.WriteBytesToUInt64(4, in_memory32);
quiche::test::CompareCharArraysWithHexError(
"uint32_t", buffer32, 4,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian32
: little_endian32,
4);
uint64_t read_number32;
QuicDataReader reader(buffer32, 4, GetParam().endianness);
reader.ReadBytesToUInt64(4, &read_number32);
EXPECT_EQ(in_memory32, read_number32);
}
}
TEST_P(QuicDataWriterTest, Write40BitUnsignedIntegers) {
uint64_t in_memory40 = 0x0000001122334455;
char little_endian40[] = {0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian40[] = {0x11, 0x22, 0x33, 0x44, 0x55};
char buffer40[5];
QuicDataWriter writer(5, buffer40, GetParam().endianness);
writer.WriteBytesToUInt64(5, in_memory40);
quiche::test::CompareCharArraysWithHexError(
"uint40", buffer40, 5,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian40
: little_endian40,
5);
uint64_t read_number40;
QuicDataReader reader(buffer40, 5, GetParam().endianness);
reader.ReadBytesToUInt64(5, &read_number40);
EXPECT_EQ(in_memory40, read_number40);
}
TEST_P(QuicDataWriterTest, Write48BitUnsignedIntegers) {
uint64_t in_memory48 = 0x0000112233445566;
char little_endian48[] = {0x66, 0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian48[] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
char buffer48[6];
QuicDataWriter writer(6, buffer48, GetParam().endianness);
writer.WriteBytesToUInt64(6, in_memory48);
quiche::test::CompareCharArraysWithHexError(
"uint48", buffer48, 6,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian48
: little_endian48,
6);
uint64_t read_number48;
QuicDataReader reader(buffer48, 6, GetParam().endianness);
reader.ReadBytesToUInt64(6., &read_number48);
EXPECT_EQ(in_memory48, read_number48);
}
TEST_P(QuicDataWriterTest, Write56BitUnsignedIntegers) {
uint64_t in_memory56 = 0x0011223344556677;
char little_endian56[] = {0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian56[] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
char buffer56[7];
QuicDataWriter writer(7, buffer56, GetParam().endianness);
writer.WriteBytesToUInt64(7, in_memory56);
quiche::test::CompareCharArraysWithHexError(
"uint56", buffer56, 7,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian56
: little_endian56,
7);
uint64_t read_number56;
QuicDataReader reader(buffer56, 7, GetParam().endianness);
reader.ReadBytesToUInt64(7, &read_number56);
EXPECT_EQ(in_memory56, read_number56);
}
TEST_P(QuicDataWriterTest, Write64BitUnsignedIntegers) {
uint64_t in_memory64 = 0x1122334455667788;
unsigned char little_endian64[] = {0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11};
unsigned char big_endian64[] = {0x11, 0x22, 0x33, 0x44,
0x55, 0x66, 0x77, 0x88};
char buffer64[8];
QuicDataWriter writer(8, buffer64, GetParam().endianness);
writer.WriteBytesToUInt64(8, in_memory64);
quiche::test::CompareCharArraysWithHexError(
"uint64_t", buffer64, 8,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER
? AsChars(big_endian64)
: AsChars(little_endian64),
8);
uint64_t read_number64;
QuicDataReader reader(buffer64, 8, GetParam().endianness);
reader.ReadBytesToUInt64(8, &read_number64);
EXPECT_EQ(in_memory64, read_number64);
QuicDataWriter writer2(8, buffer64, GetParam().endianness);
writer2.WriteUInt64(in_memory64);
quiche::test::CompareCharArraysWithHexError(
"uint64_t", buffer64, 8,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER
? AsChars(big_endian64)
: AsChars(little_endian64),
8);
read_number64 = 0u;
QuicDataReader reader2(buffer64, 8, GetParam().endianness);
reader2.ReadUInt64(&read_number64);
EXPECT_EQ(in_memory64, read_number64);
}
TEST_P(QuicDataWriterTest, WriteIntegers) {
char buf[43];
uint8_t i8 = 0x01;
uint16_t i16 = 0x0123;
uint32_t i32 = 0x01234567;
uint64_t i64 = 0x0123456789ABCDEF;
QuicDataWriter writer(46, buf, GetParam().endianness);
for (size_t i = 0; i < 10; ++i) {
switch (i) {
case 0u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 1u:
EXPECT_TRUE(writer.WriteUInt8(i8));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 2u:
EXPECT_TRUE(writer.WriteUInt16(i16));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 3u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 4u:
EXPECT_TRUE(writer.WriteUInt32(i32));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 5u:
case 6u:
case 7u:
case 8u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
default:
EXPECT_FALSE(writer.WriteBytesToUInt64(i, i64));
}
}
QuicDataReader reader(buf, 46, GetParam().endianness);
for (size_t i = 0; i < 10; ++i) {
uint8_t read8;
uint16_t read16;
uint32_t read32;
uint64_t read64;
switch (i) {
case 0u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0u, read64);
break;
case 1u:
EXPECT_TRUE(reader.ReadUInt8(&read8));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i8, read8);
EXPECT_EQ(0xEFu, read64);
break;
case 2u:
EXPECT_TRUE(reader.ReadUInt16(&read16));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i16, read16);
EXPECT_EQ(0xCDEFu, read64);
break;
case 3u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0xABCDEFu, read64);
break;
case 4u:
EXPECT_TRUE(reader.ReadUInt32(&read32));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i32, read32);
EXPECT_EQ(0x89ABCDEFu, read64);
break;
case 5u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x6789ABCDEFu, read64);
break;
case 6u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x456789ABCDEFu, read64);
break;
case 7u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x23456789ABCDEFu, read64);
break;
case 8u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x0123456789ABCDEFu, read64);
break;
default:
EXPECT_FALSE(reader.ReadBytesToUInt64(i, &read64));
}
}
}
TEST_P(QuicDataWriterTest, WriteBytes) {
char bytes[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
char buf[ABSL_ARRAYSIZE(bytes)];
QuicDataWriter writer(ABSL_ARRAYSIZE(buf), buf, GetParam().endianness);
EXPECT_TRUE(writer.WriteBytes(bytes, ABSL_ARRAYSIZE(bytes)));
for (unsigned int i = 0; i < ABSL_ARRAYSIZE(bytes); ++i) {
EXPECT_EQ(bytes[i], buf[i]);
}
}
const int kMultiVarCount = 1000;
void EncodeDecodeStreamId(uint64_t value_in) {
char buffer[1 * kMultiVarCount];
memset(buffer, 0, sizeof(buffer));
QuicDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(value_in));
QuicDataReader reader(buffer, sizeof(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
QuicStreamId received_stream_id;
uint64_t temp;
EXPECT_TRUE(reader.ReadVarInt62(&temp));
received_stream_id = static_cast<QuicStreamId>(temp);
EXPECT_EQ(value_in, received_stream_id);
}
TEST_P(QuicDataWriterTest, StreamId1) {
EncodeDecodeStreamId(UINT64_C(0x15));
EncodeDecodeStreamId(UINT64_C(0x1567));
EncodeDecodeStreamId(UINT64_C(0x34567890));
EncodeDecodeStreamId(UINT64_C(0xf4567890));
}
TEST_P(QuicDataWriterTest, WriteRandomBytes) {
char buffer[20];
char expected[20];
for (size_t i = 0; i < 20; ++i) {
expected[i] = 'r';
}
MockRandom random;
QuicDataWriter writer(20, buffer, GetParam().endianness);
EXPECT_FALSE(writer.WriteRandomBytes(&random, 30));
EXPECT_TRUE(writer.WriteRandomBytes(&random, 20));
quiche::test::CompareCharArraysWithHexError("random", buffer, 20, expected,
20);
}
TEST_P(QuicDataWriterTest, WriteInsecureRandomBytes) {
char buffer[20];
char expected[20];
for (size_t i = 0; i < 20; ++i) {
expected[i] = 'r';
}
MockRandom random;
QuicDataWriter writer(20, buffer, GetParam().endianness);
EXPECT_FALSE(writer.WriteInsecureRandomBytes(&random, 30));
EXPECT_TRUE(writer.WriteInsecureRandomBytes(&random, 20));
quiche::test::CompareCharArraysWithHexError("random", buffer, 20, expected,
20);
}
TEST_P(QuicDataWriterTest, PeekVarInt62Length) {
char buffer[20];
QuicDataWriter writer(20, buffer, quiche::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(50));
QuicDataReader reader(buffer, 20, quiche::NETWORK_BYTE_ORDER);
EXPECT_EQ(1, reader.PeekVarInt62Length());
char buffer2[20];
QuicDataWriter writer2(20, buffer2, quiche::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer2.WriteVarInt62(100));
QuicDataReader reader2(buffer2, 20, quiche::NETWORK_BYTE_ORDER);
EXPECT_EQ(2, reader2.PeekVarInt62Length());
char buffer3[20];
QuicDataWriter writer3(20, buffer3, quiche::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer3.WriteVarInt62(20000));
QuicDataReader reader3(buffer3, 20, quiche::NETWORK_BYTE_ORDER);
EXPECT_EQ(4, reader3.PeekVarInt62Length());
char buffer4[20];
QuicDataWriter writer4(20, buffer4, quiche::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer4.WriteVarInt62(2000000000));
QuicDataReader reader4(buffer4, 20, quiche::NETWORK_BYTE_ORDER);
EXPECT_EQ(8, reader4.PeekVarInt62Length());
}
TEST_P(QuicDataWriterTest, ValidStreamCount) {
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
QuicDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
QuicDataReader reader(buffer, sizeof(buffer));
const QuicStreamCount write_stream_count = 0xffeeddcc;
EXPECT_TRUE(writer.WriteVarInt62(write_stream_count));
QuicStreamCount read_stream_count;
uint64_t temp;
EXPECT_TRUE(reader.ReadVarInt62(&temp));
read_stream_count = static_cast<QuicStreamId>(temp);
EXPECT_EQ(write_stream_count, read_stream_count);
}
TEST_P(QuicDataWriterTest, Seek) {
char buffer[3] = {};
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer, GetParam().endianness);
EXPECT_TRUE(writer.WriteUInt8(42));
EXPECT_TRUE(writer.Seek(1));
EXPECT_TRUE(writer.WriteUInt8(3));
char expected[] = {42, 0, 3};
for (size_t i = 0; i < ABSL_ARRAYSIZE(expected); ++i) {
EXPECT_EQ(buffer[i], expected[i]);
}
}
TEST_P(QuicDataWriterTest, SeekTooFarFails) {
char buffer[20];
{
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_TRUE(writer.Seek(20));
EXPECT_FALSE(writer.Seek(1));
}
{
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_FALSE(writer.Seek(100));
}
{
QuicDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_TRUE(writer.Seek(10));
EXPECT_FALSE(writer.Seek(std::numeric_limits<size_t>::max()));
}
}
TEST_P(QuicDataWriterTest, PayloadReads) {
char buffer[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
char expected_first_read[4] = {1, 2, 3, 4};
char expected_remaining[12] = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
QuicDataReader reader(buffer, sizeof(buffer));
char first_read_buffer[4] = {};
EXPECT_TRUE(reader.ReadBytes(first_read_buffer, sizeof(first_read_buffer)));
quiche::test::CompareCharArraysWithHexError(
"first read", first_read_buffer, sizeof(first_read_buffer),
expected_first_read, sizeof(expected_first_read));
absl::string_view peeked_remaining_payload = reader.PeekRemainingPayload();
quiche::test::CompareCharArraysWithHexError(
"peeked_remaining_payload", peeked_remaining_payload.data(),
peeked_remaining_payload.length(), expected_remaining,
sizeof(expected_remaining));
absl::string_view full_payload = reader.FullPayload();
quiche::test::CompareCharArraysWithHexError(
"full_payload", full_payload.data(), full_payload.length(), buffer,
sizeof(buffer));
absl::string_view read_remaining_payload = reader.ReadRemainingPayload();
quiche::test::CompareCharArraysWithHexError(
"read_remaining_payload", read_remaining_payload.data(),
read_remaining_payload.length(), expected_remaining,
sizeof(expected_remaining));
EXPECT_TRUE(reader.IsDoneReading());
absl::string_view full_payload2 = reader.FullPayload();
quiche::test::CompareCharArraysWithHexError(
"full_payload2", full_payload2.data(), full_payload2.length(), buffer,
sizeof(buffer));
}
TEST_P(QuicDataWriterTest, StringPieceVarInt62) {
char inner_buffer[16] = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16};
absl::string_view inner_payload_write(inner_buffer, sizeof(inner_buffer));
char buffer[sizeof(inner_buffer) + sizeof(uint8_t)] = {};
QuicDataWriter writer(sizeof(buffer), buffer);
EXPECT_TRUE(writer.WriteStringPieceVarInt62(inner_payload_write));
EXPECT_EQ(0u, writer.remaining());
QuicDataReader reader(buffer, sizeof(buffer));
absl::string_view inner_payload_read;
EXPECT_TRUE(reader.ReadStringPieceVarInt62(&inner_payload_read));
quiche::test::CompareCharArraysWithHexError(
"inner_payload", inner_payload_write.data(), inner_payload_write.length(),
inner_payload_read.data(), inner_payload_read.length());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_data_writer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_data_writer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |