ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
157ac252-c2fa-4689-9278-684810b59383 | cpp | google/quiche | tun_device | quiche/quic/qbone/bonnet/tun_device.cc | quiche/quic/qbone/bonnet/tun_device_test.cc | #include "quiche/quic/qbone/bonnet/tun_device.h"
#include <fcntl.h>
#include <linux/if_tun.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <ios>
#include <string>
#include "absl/cleanup/cleanup.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/qbone/platform/kernel_interface.h"
ABSL_FLAG(std::string, qbone_client_tun_device_path, "/dev/net/tun",
"The path to the QBONE client's TUN device.");
namespace quic {
const int kInvalidFd = -1;
TunTapDevice::TunTapDevice(const std::string& interface_name, int mtu,
bool persist, bool setup_tun, bool is_tap,
KernelInterface* kernel)
: interface_name_(interface_name),
mtu_(mtu),
persist_(persist),
setup_tun_(setup_tun),
is_tap_(is_tap),
file_descriptor_(kInvalidFd),
kernel_(*kernel) {}
TunTapDevice::~TunTapDevice() {
if (!persist_) {
Down();
}
CloseDevice();
}
bool TunTapDevice::Init() {
if (interface_name_.empty() || interface_name_.size() >= IFNAMSIZ) {
QUIC_BUG(quic_bug_10995_1)
<< "interface_name must be nonempty and shorter than " << IFNAMSIZ;
return false;
}
if (!OpenDevice()) {
return false;
}
if (!ConfigureInterface()) {
return false;
}
return true;
}
bool TunTapDevice::Up() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = IFF_UP;
return NetdeviceIoctl(SIOCSIFFLAGS, reinterpret_cast<void*>(&if_request));
}
bool TunTapDevice::Down() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = 0;
return NetdeviceIoctl(SIOCSIFFLAGS, reinterpret_cast<void*>(&if_request));
}
int TunTapDevice::GetFileDescriptor() const { return file_descriptor_; }
bool TunTapDevice::OpenDevice() {
if (file_descriptor_ != kInvalidFd) {
CloseDevice();
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_flags = IFF_MULTI_QUEUE | IFF_NO_PI;
if (is_tap_) {
if_request.ifr_flags |= IFF_TAP;
} else {
if_request.ifr_flags |= IFF_TUN;
}
bool successfully_opened = false;
auto cleanup = absl::MakeCleanup([this, &successfully_opened]() {
if (!successfully_opened) {
CloseDevice();
}
});
const std::string tun_device_path =
absl::GetFlag(FLAGS_qbone_client_tun_device_path);
int fd = kernel_.open(tun_device_path.c_str(), O_RDWR);
if (fd < 0) {
QUIC_PLOG(WARNING) << "Failed to open " << tun_device_path;
return successfully_opened;
}
file_descriptor_ = fd;
if (!CheckFeatures(fd)) {
return successfully_opened;
}
if (kernel_.ioctl(fd, TUNSETIFF, reinterpret_cast<void*>(&if_request)) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNSETIFF on fd(" << fd << ")";
return successfully_opened;
}
if (kernel_.ioctl(
fd, TUNSETPERSIST,
persist_ ? reinterpret_cast<void*>(&if_request) : nullptr) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNSETPERSIST on fd(" << fd << ")";
return successfully_opened;
}
successfully_opened = true;
return successfully_opened;
}
bool TunTapDevice::ConfigureInterface() {
if (!setup_tun_) {
return true;
}
struct ifreq if_request;
memset(&if_request, 0, sizeof(if_request));
interface_name_.copy(if_request.ifr_name, IFNAMSIZ);
if_request.ifr_mtu = mtu_;
if (!NetdeviceIoctl(SIOCSIFMTU, reinterpret_cast<void*>(&if_request))) {
CloseDevice();
return false;
}
return true;
}
bool TunTapDevice::CheckFeatures(int tun_device_fd) {
unsigned int actual_features;
if (kernel_.ioctl(tun_device_fd, TUNGETFEATURES, &actual_features) != 0) {
QUIC_PLOG(WARNING) << "Failed to TUNGETFEATURES";
return false;
}
unsigned int required_features = IFF_TUN | IFF_NO_PI;
if ((required_features & actual_features) != required_features) {
QUIC_LOG(WARNING)
<< "Required feature does not exist. required_features: 0x" << std::hex
<< required_features << " vs actual_features: 0x" << std::hex
<< actual_features;
return false;
}
return true;
}
bool TunTapDevice::NetdeviceIoctl(int request, void* argp) {
int fd = kernel_.socket(AF_INET6, SOCK_DGRAM, 0);
if (fd < 0) {
QUIC_PLOG(WARNING) << "Failed to create AF_INET6 socket.";
return false;
}
if (kernel_.ioctl(fd, request, argp) != 0) {
QUIC_PLOG(WARNING) << "Failed ioctl request: " << request;
kernel_.close(fd);
return false;
}
kernel_.close(fd);
return true;
}
void TunTapDevice::CloseDevice() {
if (file_descriptor_ != kInvalidFd) {
kernel_.close(file_descriptor_);
file_descriptor_ = kInvalidFd;
}
}
} | #include "quiche/quic/qbone/bonnet/tun_device.h"
#include <linux/if.h>
#include <linux/if_tun.h>
#include <sys/ioctl.h>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/platform/mock_kernel.h"
namespace quic::test {
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::Unused;
const char kDeviceName[] = "tun0";
const int kSupportedFeatures =
IFF_TUN | IFF_TAP | IFF_MULTI_QUEUE | IFF_ONE_QUEUE | IFF_NO_PI;
class TunDeviceTest : public QuicTest {
protected:
void SetUp() override {
EXPECT_CALL(mock_kernel_, socket(AF_INET6, _, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([this](Unused, Unused, Unused) {
EXPECT_CALL(mock_kernel_, close(next_fd_)).WillOnce(Return(0));
return next_fd_++;
}));
}
void SetInitExpectations(int mtu, bool persist) {
EXPECT_CALL(mock_kernel_, open(StrEq("/dev/net/tun"), _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([this](Unused, Unused) {
EXPECT_CALL(mock_kernel_, close(next_fd_)).WillOnce(Return(0));
return next_fd_++;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([](Unused, Unused, void* argp) {
auto* actual_flags = reinterpret_cast<int*>(argp);
*actual_flags = kSupportedFeatures;
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETIFF, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_EQ(IFF_TUN | IFF_MULTI_QUEUE | IFF_NO_PI, ifr->ifr_flags);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETPERSIST, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([persist](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
if (persist) {
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
} else {
EXPECT_EQ(nullptr, ifr);
}
return 0;
}));
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFMTU, _))
.Times(AnyNumber())
.WillRepeatedly(Invoke([mtu](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_EQ(mtu, ifr->ifr_mtu);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
return 0;
}));
}
void ExpectUp(bool fail) {
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFFLAGS, _))
.WillOnce(Invoke([fail](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_TRUE(ifr->ifr_flags & IFF_UP);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
if (fail) {
return -1;
} else {
return 0;
}
}));
}
void ExpectDown(bool fail) {
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFFLAGS, _))
.WillOnce(Invoke([fail](Unused, Unused, void* argp) {
auto* ifr = reinterpret_cast<struct ifreq*>(argp);
EXPECT_FALSE(ifr->ifr_flags & IFF_UP);
EXPECT_THAT(ifr->ifr_name, StrEq(kDeviceName));
if (fail) {
return -1;
} else {
return 0;
}
}));
}
MockKernel mock_kernel_;
int next_fd_ = 100;
};
TEST_F(TunDeviceTest, BasicWorkFlow) {
SetInitExpectations( 1500, false);
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_TRUE(tun_device.Init());
EXPECT_GT(tun_device.GetFileDescriptor(), -1);
ExpectUp( false);
EXPECT_TRUE(tun_device.Up());
ExpectDown( false);
}
TEST_F(TunDeviceTest, FailToOpenTunDevice) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, open(StrEq("/dev/net/tun"), _))
.WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, FailToCheckFeature) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, TooFewFeature) {
SetInitExpectations( 1500, false);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNGETFEATURES, _))
.WillOnce(Invoke([](Unused, Unused, void* argp) {
int* actual_features = reinterpret_cast<int*>(argp);
*actual_features = IFF_TUN | IFF_ONE_QUEUE;
return 0;
}));
TunTapDevice tun_device(kDeviceName, 1500, false, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
ExpectDown(false);
}
TEST_F(TunDeviceTest, FailToSetFlag) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETIFF, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToPersistDevice) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, TUNSETPERSIST, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToOpenSocket) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, socket(AF_INET6, _, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToSetMtu) {
SetInitExpectations( 1500, true);
EXPECT_CALL(mock_kernel_, ioctl(_, SIOCSIFMTU, _)).WillOnce(Return(-1));
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_FALSE(tun_device.Init());
EXPECT_EQ(tun_device.GetFileDescriptor(), -1);
}
TEST_F(TunDeviceTest, FailToUp) {
SetInitExpectations( 1500, true);
TunTapDevice tun_device(kDeviceName, 1500, true, true, false, &mock_kernel_);
EXPECT_TRUE(tun_device.Init());
EXPECT_GT(tun_device.GetFileDescriptor(), -1);
ExpectUp( true);
EXPECT_FALSE(tun_device.Up());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a60fb0ae-01f5-4908-b4fc-335e70e7759f | cpp | tensorflow/tensorflow | unified_api | tensorflow/python/framework/experimental/unified_api.cc | tensorflow/c/eager/unified_api_test.cc | #include <pybind11/stl.h>
#include <memory>
#include "pybind11/pybind11.h"
#include "tensorflow/c/eager/abstract_context.h"
#include "tensorflow/c/eager/abstract_function.h"
#include "tensorflow/c/eager/abstract_operation.h"
#include "tensorflow/c/eager/abstract_tensor_handle.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/c_api_internal.h"
#include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/immediate_execution_context.h"
#include "tensorflow/c/eager/immediate_execution_tensor_handle.h"
#include "tensorflow/c/eager/tfe_context_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/safe_ptr.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/python/eager/pywrap_tensor.h"
#include "tensorflow/python/lib/core/pybind11_lib.h"
#include "tensorflow/python/lib/core/pybind11_status.h"
#include "tensorflow/python/lib/core/safe_pyobject_ptr.h"
namespace py = pybind11;
using tensorflow::AbstractContext;
using tensorflow::AbstractContextPtr;
using tensorflow::AbstractFunction;
using tensorflow::AbstractOperation;
using tensorflow::AbstractOperationPtr;
using tensorflow::AbstractTensorHandle;
using tensorflow::AbstractTensorHandlePtr;
using tensorflow::OutputList;
using tensorflow::tracing::TracingContext;
using tensorflow::tracing::TracingOperation;
using tensorflow::tracing::TracingTensorHandle;
using tensorflow::ImmediateContextPtr;
using tensorflow::ImmediateExecutionContext;
using tensorflow::ImmediateExecutionTensorHandle;
using tensorflow::dyn_cast;
using tensorflow::isa;
using tensorflow::unwrap;
using tensorflow::wrap;
using tensorflow::DataType;
using tensorflow::make_safe;
using tensorflow::MaybeRaiseRegisteredFromStatus;
using tensorflow::MaybeRaiseRegisteredFromTFStatus;
using tensorflow::Pyo;
using tensorflow::Safe_TF_StatusPtr;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::TFE_TensorHandleToNumpy;
using tensorflow::core::RefCountPtr;
using tensorflow::errors::Internal;
using tensorflow::errors::InvalidArgument;
PYBIND11_MODULE(_unified_api, m) {
m.def("SetTracingImplementation", [](const char* impl) {
Safe_TF_StatusPtr status = make_safe(TF_NewStatus());
TF_SetTracingImplementation(impl, status.get());
MaybeRaiseRegisteredFromStatus(status->status);
});
m.def("NewTracingContext", [](const char* fn_name) {
Safe_TF_StatusPtr status = make_safe(TF_NewStatus());
auto* ctx = unwrap(TF_CreateFunction(fn_name, status.get()));
MaybeRaiseRegisteredFromTFStatus(status.get());
if (!ctx) {
MaybeRaiseRegisteredFromStatus(
Internal("TF_CreateFunction returned nullptr"));
}
if (!isa<TracingContext>(ctx)) {
MaybeRaiseRegisteredFromStatus(
Internal("TF_CreateFunction must return a TracingContext, found ",
ctx->getKind()));
}
return dyn_cast<TracingContext>(ctx);
});
m.def("EagerContextToImmediateExecutionContext", [](py::handle& obj) {
TFE_Context* ctx =
static_cast<TFE_Context*>(PyCapsule_GetPointer(obj.ptr(), nullptr));
if (!ctx) {
MaybeRaiseRegisteredFromStatus(InvalidArgument("TFE_Context is nullptr"));
}
return unwrap(ctx);
});
py::class_<AbstractContext, AbstractContextPtr>(m, "AbstractContext")
.def("CreateOperation",
[](AbstractContext* self, const char* op,
const char* raw_device_name) {
auto operation = self->CreateOperation();
(void)operation->Reset(op, raw_device_name);
return operation;
})
.def("RegisterFunction",
[](AbstractContext* self, AbstractFunction* f) {
Status s = self->RegisterFunction(f);
MaybeRaiseRegisteredFromStatus(s);
})
.def("RemoveFunction", [](AbstractContext* self, const string& func) {
Status s = self->RemoveFunction(func);
MaybeRaiseRegisteredFromStatus(s);
});
py::class_<TracingContext, AbstractContext>(m, "TracingContext")
.def("AddParameter",
[](TracingContext* self, DataType dtype) {
TracingTensorHandle* handle = nullptr;
tensorflow::PartialTensorShape shape;
Status s = self->AddParameter(dtype, shape, &handle);
MaybeRaiseRegisteredFromStatus(s);
return static_cast<AbstractTensorHandle*>(handle);
})
.def("Finalize", [](TracingContext* self, py::handle& outputs) {
OutputList output_list;
if (outputs.ptr() != Py_None) {
if (!PyList_Check(outputs.ptr())) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("must provide a list of Tensors as inputs"));
}
Py_ssize_t len = PyList_Size(outputs.ptr());
output_list.outputs.resize(len);
for (Py_ssize_t i = 0; i < len; ++i) {
PyObject* elem = PyList_GetItem(outputs.ptr(), i);
if (!elem) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("Tensor at index ", i, " is None."));
}
py::handle elem_h = elem;
AbstractTensorHandle* handle = elem_h.cast<AbstractTensorHandle*>();
if (!isa<TracingTensorHandle>(handle)) {
MaybeRaiseRegisteredFromStatus(InvalidArgument(
"Tensor at index ", i, " is not a graph tensor."));
}
output_list.outputs[i] = handle;
}
}
AbstractFunction* f = nullptr;
Status s = self->Finalize(&output_list, &f);
MaybeRaiseRegisteredFromStatus(s);
return f;
});
py::class_<ImmediateExecutionContext, AbstractContext,
std::unique_ptr<ImmediateExecutionContext, py::nodelete>>
ImmediateExecutionContext(m, "ImmediateExecutionContext");
py::class_<AbstractOperation, AbstractOperationPtr>(m, "AbstractOperation")
.def("Reset",
[](AbstractOperation* self, const char* op,
const char* raw_device_name) {
Status s = self->Reset(op, raw_device_name);
MaybeRaiseRegisteredFromStatus(s);
})
.def("SetOpName",
[](AbstractOperation* self, const char* op_name) {
if (isa<TracingOperation>(self)) {
auto tracing_op = reinterpret_cast<TracingOperation*>(self);
Status s = tracing_op->SetOpName(op_name);
MaybeRaiseRegisteredFromStatus(s);
}
})
.def("Name", &AbstractOperation::Name)
.def("DeviceName", &AbstractOperation::DeviceName)
.def("SetDeviceName",
[](AbstractOperation* self, const char* name) {
Status s = self->SetDeviceName(name);
MaybeRaiseRegisteredFromStatus(s);
})
.def("AddInput",
[](AbstractOperation* self, AbstractTensorHandle* input) {
Status s = self->AddInput(input);
MaybeRaiseRegisteredFromStatus(s);
})
.def("SetAttrType",
[](AbstractOperation* self, const char* attr_name, DataType value) {
Status s = self->SetAttrType(attr_name, value);
MaybeRaiseRegisteredFromStatus(s);
})
.def("Execute", [](AbstractOperation* self, int num_outputs) {
std::vector<AbstractTensorHandle*> outputs(num_outputs);
MaybeRaiseRegisteredFromStatus(
self->Execute(absl::MakeSpan(outputs), &num_outputs));
return outputs;
});
py::class_<AbstractTensorHandle, AbstractTensorHandlePtr>(
m, "AbstractTensorHandle")
.def("DataType", &AbstractTensorHandle::DataType)
.def("numpy", [](AbstractTensorHandle* self) {
if (!isa<ImmediateExecutionTensorHandle>(self)) {
MaybeRaiseRegisteredFromStatus(Internal(
"AbstractTensorHandle.numpy() must be called with an ",
"ImmediateExecutionTensorHandle found type: ", self->getKind()));
}
TF_Status s;
TFE_TensorHandle* handle =
wrap(dyn_cast<ImmediateExecutionTensorHandle>(self));
auto result = TFE_TensorHandleToNumpy(handle, &s);
MaybeRaiseRegisteredFromStatus(s.status);
return Pyo(result);
});
m.def("EagerTensorToImmediateExecutionTensorHandle", [](py::object handle) {
if (!EagerTensor_CheckExact(handle.ptr())) {
MaybeRaiseRegisteredFromStatus(
InvalidArgument("EagerTensorToImmediateExecutionTensorHandle called "
"with non-EagerTensor."));
}
TFE_TensorHandle* eager_tensor = EagerTensor_Handle(handle.ptr());
auto t = static_cast<AbstractTensorHandle*>(unwrap(eager_tensor));
t->Ref();
return t;
});
py::class_<AbstractFunction,
std::unique_ptr<AbstractFunction, tsl::core::RefCountDeleter>>
AbstractFunction(m, "AbstractFunction");
} | #include "tensorflow/c/eager/c_api_unified_experimental.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/llvm_rtti/llvm_rtti.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class UnifiedAPI
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
Status s = StatusFromTF_Status(status.get());
CHECK_EQ(errors::OK, s.code()) << s.message();
}
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
Status TestScalarShape(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(inputs[0]->Shape(&shape));
if (shape.dims() != 0) {
return errors::InvalidArgument(
"Tensor expected to have scalar shape found rank: ", shape.dims());
}
return absl::OkStatus();
}
TEST_P(UnifiedAPI, TestTensorShapeScalar) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
Status s = TestScalarTensorHandle<float, TF_FLOAT>(ctx.get(), 2.0f, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
Status s = RunModel(TestScalarShape, ctx.get(),
{x.get()},
{},
UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
Status TestTensorShape2x4(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(inputs[0]->Shape(&shape));
if (shape.dims() != 2) {
return errors::InvalidArgument(
"Tensor expected to have rank 2 found rank: ", shape.dims());
}
int64_t dim_sizes[] = {2, 4};
for (int i = 0; i < shape.dims(); i++) {
if (shape.dim_size(i) != dim_sizes[i]) {
return errors::InvalidArgument("Dim ", i, " expected to be of size ",
dim_sizes[i],
" found: ", shape.dim_size(i));
}
}
return absl::OkStatus();
}
TEST_P(UnifiedAPI, TestTensorShape2x4) {
if (UseFunction() && UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx;
{
AbstractContext* ctx_raw = nullptr;
Status s =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ctx.reset(ctx_raw);
}
AbstractTensorHandlePtr x;
{
AbstractTensorHandle* x_raw = nullptr;
float data[] = {0., 0., 0., 0., 0., 0., 0., 0};
int64_t dim_sizes[] = {2, 4};
Status s = TestTensorHandleWithDims<float, TF_FLOAT>(ctx.get(), data,
dim_sizes, 2, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
Status s = RunModel(TestTensorShape2x4, ctx.get(),
{x.get()},
{},
UseFunction());
ASSERT_EQ(errors::OK, s.code()) << s.message();
}
TEST_P(UnifiedAPI, TestUnknownShapeTracing) {
if (!UseFunction()) {
GTEST_SKIP() << "Tracing only test.";
}
if (UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx(BuildFunction("test_fn"));
AbstractTensorHandlePtr x;
{
tracing::TracingTensorHandle* x_raw = nullptr;
PartialTensorShape shape;
Status s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_TRUE(shape.unknown_rank());
}
TEST_P(UnifiedAPI, TestPartialShapeTracing) {
if (!UseFunction()) {
GTEST_SKIP() << "Tracing only test.";
}
if (UseMlir()) {
GTEST_SKIP() << "MlirTensor::Shape is not implemented yet.";
}
AbstractContextPtr ctx(BuildFunction("test_fn"));
AbstractTensorHandlePtr x;
{
tracing::TracingTensorHandle* x_raw = nullptr;
PartialTensorShape shape;
int64_t dim_sizes[] = {2, -1};
Status s = PartialTensorShape::MakePartialShape(dim_sizes, 2, &shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
s = dyn_cast<tracing::TracingContext>(ctx.get())->AddParameter(
DT_FLOAT, shape, &x_raw);
ASSERT_EQ(errors::OK, s.code()) << s.message();
x.reset(x_raw);
}
PartialTensorShape shape;
Status s = x->Shape(&shape);
ASSERT_EQ(errors::OK, s.code()) << s.message();
ASSERT_FALSE(shape.unknown_rank());
ASSERT_EQ(2, shape.dim_size(0));
ASSERT_EQ(-1, shape.dim_size(1));
}
INSTANTIATE_TEST_SUITE_P(
UnifiedCppAPI, UnifiedAPI,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/experimental/unified_api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/unified_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75b1a335-02f3-4b27-ade6-73c68ae3eab2 | cpp | abseil/abseil-cpp | path_util | absl/flags/internal/path_util.h | absl/flags/internal/path_util_test.cc | #ifndef ABSL_FLAGS_INTERNAL_PATH_UTIL_H_
#define ABSL_FLAGS_INTERNAL_PATH_UTIL_H_
#include "absl/base/config.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
inline absl::string_view Basename(absl::string_view filename) {
auto last_slash_pos = filename.find_last_of("/\\");
return last_slash_pos == absl::string_view::npos
? filename
: filename.substr(last_slash_pos + 1);
}
inline absl::string_view Package(absl::string_view filename) {
auto last_slash_pos = filename.find_last_of("/\\");
return last_slash_pos == absl::string_view::npos
? absl::string_view()
: filename.substr(0, last_slash_pos + 1);
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/flags/internal/path_util.h"
#include "gtest/gtest.h"
namespace {
namespace flags = absl::flags_internal;
TEST(FlagsPathUtilTest, TestBasename) {
EXPECT_EQ(flags::Basename(""), "");
EXPECT_EQ(flags::Basename("a.cc"), "a.cc");
EXPECT_EQ(flags::Basename("dir/a.cc"), "a.cc");
EXPECT_EQ(flags::Basename("dir1/dir2/a.cc"), "a.cc");
EXPECT_EQ(flags::Basename("../dir1/dir2/a.cc"), "a.cc");
EXPECT_EQ(flags::Basename("/dir1/dir2/a.cc"), "a.cc");
EXPECT_EQ(flags::Basename("/dir1/dir2/../dir3/a.cc"), "a.cc");
}
TEST(FlagsPathUtilTest, TestPackage) {
EXPECT_EQ(flags::Package(""), "");
EXPECT_EQ(flags::Package("a.cc"), "");
EXPECT_EQ(flags::Package("dir/a.cc"), "dir/");
EXPECT_EQ(flags::Package("dir1/dir2/a.cc"), "dir1/dir2/");
EXPECT_EQ(flags::Package("../dir1/dir2/a.cc"), "../dir1/dir2/");
EXPECT_EQ(flags::Package("/dir1/dir2/a.cc"), "/dir1/dir2/");
EXPECT_EQ(flags::Package("/dir1/dir2/../dir3/a.cc"), "/dir1/dir2/../dir3/");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/path_util.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/internal/path_util_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ea855415-dd79-4602-bd93-5e2fe3e53737 | cpp | tensorflow/tensorflow | modular_filesystem | tensorflow/c/experimental/filesystem/modular_filesystem.cc | tensorflow/c/experimental/filesystem/modular_filesystem_test.cc | #include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <algorithm>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "tensorflow/c/experimental/filesystem/filesystem_interface.h"
#include "tensorflow/c/experimental/filesystem/modular_filesystem_registration.h"
#include "tensorflow/c/tf_file_statistics.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_statistics.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
namespace tensorflow {
using UniquePtrTo_TF_Status =
::std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
Status ModularFileSystem::NewRandomAccessFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<RandomAccessFile>* result) {
if (ops_->new_random_access_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewRandomAccessFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_RandomAccessFile>();
std::string translated_name = TranslateName(fname);
ops_->new_random_access_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularRandomAccessFile>(
translated_name, std::move(file), random_access_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewWritableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_writable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewWritableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_writable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewAppendableFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<WritableFile>* result) {
if (ops_->new_appendable_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support NewAppendableFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto file = std::make_unique<TF_WritableFile>();
std::string translated_name = TranslateName(fname);
ops_->new_appendable_file(filesystem_.get(), translated_name.c_str(),
file.get(), plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularWritableFile>(
translated_name, std::move(file), writable_file_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::NewReadOnlyMemoryRegionFromFile(
const std::string& fname, TransactionToken* token,
std::unique_ptr<ReadOnlyMemoryRegion>* result) {
if (ops_->new_read_only_memory_region_from_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname,
" does not support NewReadOnlyMemoryRegionFromFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
auto region = std::make_unique<TF_ReadOnlyMemoryRegion>();
std::string translated_name = TranslateName(fname);
ops_->new_read_only_memory_region_from_file(
filesystem_.get(), translated_name.c_str(), region.get(),
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK)
*result = std::make_unique<ModularReadOnlyMemoryRegion>(
std::move(region), read_only_memory_region_ops_.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::FileExists(const std::string& fname,
TransactionToken* token) {
if (ops_->path_exists == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support FileExists()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
const std::string translated_name = TranslateName(fname);
ops_->path_exists(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
bool ModularFileSystem::FilesExist(const std::vector<std::string>& files,
TransactionToken* token,
std::vector<Status>* status) {
if (ops_->paths_exist == nullptr)
return FileSystem::FilesExist(files, token, status);
std::vector<char*> translated_names;
translated_names.reserve(files.size());
for (int i = 0; i < files.size(); i++)
translated_names.push_back(strdup(TranslateName(files[i]).c_str()));
bool result;
if (status == nullptr) {
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), nullptr);
} else {
std::vector<TF_Status*> plugin_status;
plugin_status.reserve(files.size());
for (int i = 0; i < files.size(); i++)
plugin_status.push_back(TF_NewStatus());
result = ops_->paths_exist(filesystem_.get(), translated_names.data(),
files.size(), plugin_status.data());
for (int i = 0; i < files.size(); i++) {
status->push_back(StatusFromTF_Status(plugin_status[i]));
TF_DeleteStatus(plugin_status[i]);
}
}
for (int i = 0; i < files.size(); i++) free(translated_names[i]);
return result;
}
Status ModularFileSystem::GetChildren(const std::string& dir,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_children == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dir, " does not support GetChildren()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dir);
char** children = nullptr;
const int num_children =
ops_->get_children(filesystem_.get(), translated_name.c_str(), &children,
plugin_status.get());
if (num_children >= 0) {
for (int i = 0; i < num_children; i++) {
result->push_back(std::string(children[i]));
plugin_memory_free_(children[i]);
}
plugin_memory_free_(children);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetMatchingPaths(const std::string& pattern,
TransactionToken* token,
std::vector<std::string>* result) {
if (ops_->get_matching_paths == nullptr)
return internal::GetMatchingPaths(this, Env::Default(), pattern, result);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
char** matches = nullptr;
const int num_matches = ops_->get_matching_paths(
filesystem_.get(), pattern.c_str(), &matches, plugin_status.get());
if (num_matches >= 0) {
for (int i = 0; i < num_matches; i++) {
result->push_back(std::string(matches[i]));
plugin_memory_free_(matches[i]);
}
plugin_memory_free_(matches);
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteFile(const std::string& fname,
TransactionToken* token) {
if (ops_->delete_file == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support DeleteFile()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
ops_->delete_file(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteRecursively(const std::string& dirname,
TransactionToken* token,
int64_t* undeleted_files,
int64_t* undeleted_dirs) {
if (undeleted_files == nullptr || undeleted_dirs == nullptr)
return errors::FailedPrecondition(
"DeleteRecursively must not be called with `undeleted_files` or "
"`undeleted_dirs` set to NULL");
if (ops_->delete_recursively == nullptr)
return FileSystem::DeleteRecursively(dirname, token, undeleted_files,
undeleted_dirs);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
uint64_t plugin_undeleted_files, plugin_undeleted_dirs;
ops_->delete_recursively(filesystem_.get(), translated_name.c_str(),
&plugin_undeleted_files, &plugin_undeleted_dirs,
plugin_status.get());
*undeleted_files = plugin_undeleted_files;
*undeleted_dirs = plugin_undeleted_dirs;
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::DeleteDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->delete_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support DeleteDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->delete_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RecursivelyCreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->recursively_create_dir == nullptr)
return FileSystem::RecursivelyCreateDir(dirname, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->recursively_create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CreateDir(const std::string& dirname,
TransactionToken* token) {
if (ops_->create_dir == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", dirname, " does not support CreateDir()"));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(dirname);
ops_->create_dir(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::Stat(const std::string& fname,
TransactionToken* token, FileStatistics* stat) {
if (ops_->stat == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Filesystem for ", fname, " does not support Stat()"));
if (stat == nullptr)
return errors::InvalidArgument("FileStatistics pointer must not be NULL");
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
TF_FileStatistics stats;
ops_->stat(filesystem_.get(), translated_name.c_str(), &stats,
plugin_status.get());
if (TF_GetCode(plugin_status.get()) == TF_OK) {
stat->length = stats.length;
stat->mtime_nsec = stats.mtime_nsec;
stat->is_directory = stats.is_directory;
}
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::IsDirectory(const std::string& name,
TransactionToken* token) {
if (ops_->is_directory == nullptr)
return FileSystem::IsDirectory(name, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(name);
ops_->is_directory(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::GetFileSize(const std::string& fname,
TransactionToken* token,
uint64* file_size) {
if (ops_->get_file_size == nullptr) {
FileStatistics stat;
Status status = Stat(fname, &stat);
if (!status.ok()) return status;
if (stat.is_directory)
return errors::FailedPrecondition("Called GetFileSize on a directory");
*file_size = stat.length;
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_name = TranslateName(fname);
*file_size = ops_->get_file_size(filesystem_.get(), translated_name.c_str(),
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::RenameFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->rename_file == nullptr) {
Status status = CopyFile(src, target);
if (status.ok()) status = DeleteFile(src);
return status;
}
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->rename_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::CopyFile(const std::string& src,
const std::string& target,
TransactionToken* token) {
if (ops_->copy_file == nullptr)
return FileSystem::CopyFile(src, target, token);
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
std::string translated_src = TranslateName(src);
std::string translated_target = TranslateName(target);
ops_->copy_file(filesystem_.get(), translated_src.c_str(),
translated_target.c_str(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
std::string ModularFileSystem::TranslateName(const std::string& name) const {
if (ops_->translate_name == nullptr) return FileSystem::TranslateName(name);
char* p = ops_->translate_name(filesystem_.get(), name.c_str());
CHECK(p != nullptr) << "TranslateName(" << name << ") returned nullptr";
std::string ret(p);
plugin_memory_free_(p);
return ret;
}
void ModularFileSystem::FlushCaches(TransactionToken* token) {
if (ops_->flush_caches != nullptr) ops_->flush_caches(filesystem_.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<string>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Buffer;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].buffer_val.buf = const_cast<char*>(values[i].c_str());
option_values[i].buffer_val.buf_length = values[i].size();
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<int64_t>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Int;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].int_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularFileSystem::SetOption(const std::string& name,
const std::vector<double>& values) {
if (ops_->set_filesystem_configuration == nullptr) {
return errors::Unimplemented(
"Filesystem does not support SetConfiguration()");
}
if (values.empty()) {
return errors::InvalidArgument(
"SetConfiguration() needs number of values > 0");
}
TF_Filesystem_Option option;
memset(&option, 0, sizeof(option));
option.name = const_cast<char*>(name.c_str());
TF_Filesystem_Option_Value option_value;
memset(&option_value, 0, sizeof(option_value));
option_value.type_tag = TF_Filesystem_Option_Type_Real;
option_value.num_values = values.size();
std::vector<TF_Filesystem_Option_Value_Union> option_values(values.size());
for (size_t i = 0; i < values.size(); i++) {
memset(&option_values[i], 0, sizeof(option_values[i]));
option_values[i].real_val = values[i];
}
option_value.values = &option_values[0];
option.value = &option_value;
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->set_filesystem_configuration(filesystem_.get(), &option, 1,
plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Read(uint64 offset, size_t n,
StringPiece* result, char* scratch) const {
if (ops_->read == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Read() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
int64_t read =
ops_->read(file_.get(), offset, n, scratch, plugin_status.get());
if (read > 0) *result = StringPiece(scratch, read);
return StatusFromTF_Status(plugin_status.get());
}
Status ModularRandomAccessFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Append(StringPiece data) {
if (ops_->append == nullptr)
return errors::Unimplemented(tensorflow::strings::StrCat(
"Append() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->append(file_.get(), data.data(), data.size(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Close() {
if (ops_->close == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Close() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->close(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Flush() {
if (ops_->flush == nullptr) return OkStatus();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->flush(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Sync() {
if (ops_->sync == nullptr) return Flush();
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
ops_->sync(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status ModularWritableFile::Name(StringPiece* result) const {
*result = filename_;
return OkStatus();
}
Status ModularWritableFile::Tell(int64_t* position) {
if (ops_->tell == nullptr)
return errors::Unimplemented(
tensorflow::strings::StrCat("Tell() not implemented for ", filename_));
UniquePtrTo_TF_Status plugin_status(TF_NewStatus(), TF_DeleteStatus);
*position = ops_->tell(file_.get(), plugin_status.get());
return StatusFromTF_Status(plugin_status.get());
}
Status RegisterFilesystemPlugin(const std::string& dso_path) {
Env* env = Env::Default();
void* dso_handle;
TF_RETURN_IF_ERROR(env->LoadDynamicLibrary(dso_path.c_str(), &dso_handle));
void* dso_symbol;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
env->GetSymbolFromLibrary(dso_handle, "TF_InitPlugin", &dso_symbol),
"Failed to load TF_InitPlugin symbol for DSO: ", dso_path);
TF_FilesystemPluginInfo info;
memset(&info, 0, sizeof(info));
auto TF_InitPlugin =
reinterpret_cast<int (*)(TF_FilesystemPluginInfo*)>(dso_symbol);
TF_InitPlugin(&info);
return filesystem_registration::RegisterFilesystemPluginImpl(&info);
}
} | #include "tensorflow/c/experimental/filesystem/modular_filesystem.h"
#include <memory>
#include <random>
#include <string>
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/command_line_flags.h"
#if defined(PLATFORM_WINDOWS)
#include <direct.h>
#define mkdir(name, mode) _mkdir(name)
#undef CopyFile
#undef DeleteFile
#undef TranslateName
#endif
namespace tensorflow {
namespace {
using ::tensorflow::error::Code;
class ModularFileSystemTest : public ::testing::TestWithParam<std::string> {
public:
ModularFileSystemTest() {
const std::string test_name = tensorflow::str_util::StringReplace(
::testing::UnitTest::GetInstance()->current_test_info()->name(), "/",
"_", true);
if (!cloud_path_.empty()) {
root_dir_ = tensorflow::strings::StrCat(
"/", tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name), "/");
} else {
root_dir_ = tensorflow::io::JoinPath(
tmp_dir_,
tensorflow::strings::StrCat("tf_fs_", rng_val_, "_", test_name));
}
if (!GetParam().empty()) {
root_dir_ = tensorflow::strings::StrCat(GetParam(), ":
root_dir_);
}
env_ = Env::Default();
}
void SetUp() override {
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(root_dir_, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
s = fs->CreateDir(root_dir_);
if (!s.ok()) {
GTEST_SKIP() << "Cannot create working directory: " << s;
}
}
std::string GetURIForPath(StringPiece path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
StringPiece GetRelativePath(StringPiece absolute_path) {
return tensorflow::str_util::StripPrefix(absolute_path, root_dir_);
}
static void InitializeTestRNG() {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
rng_val_ = distribution(gen);
}
static void SetCloudPath(const std::string& cloud_path) {
cloud_path_ = cloud_path;
if (cloud_path_.back() == '/') cloud_path_.pop_back();
}
static void SetTmpDir(const std::string& tmp_dir) {
tmp_dir_ = tmp_dir.empty() ? ::testing::TempDir() : tmp_dir;
}
protected:
Env* env_;
private:
std::string root_dir_;
static int rng_val_;
static std::string cloud_path_;
static std::string tmp_dir_;
};
int ModularFileSystemTest::rng_val_;
std::string ModularFileSystemTest::cloud_path_;
std::string ModularFileSystemTest::tmp_dir_;
bool UnimplementedOrReturnsCode(Status actual_status, Code expected_code) {
Code actual_code = actual_status.code();
return (actual_code == Code::UNIMPLEMENTED) || (actual_code == expected_code);
}
TEST_P(ModularFileSystemTest, TestTranslateName) {
const std::string generic_path = GetURIForPath("some_path");
FileSystem* fs = nullptr;
Status s = env_->GetFileSystemForFile(generic_path, &fs);
if (fs == nullptr || !s.ok())
GTEST_SKIP() << "No filesystem registered: " << s;
if (GetParam().empty()) {
EXPECT_EQ(fs->TranslateName(""), "");
EXPECT_EQ(fs->TranslateName("/"), "/");
EXPECT_EQ(fs->TranslateName("
EXPECT_EQ(fs->TranslateName("a_file"), "a_file");
EXPECT_EQ(fs->TranslateName("a_dir/.."), ".");
} else {
EXPECT_EQ(fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
EXPECT_EQ(
fs->TranslateName(tensorflow::strings::StrCat(GetParam(), ":
"/");
}
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("a_dir/a_file"))),
"/a_dir/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(GetURIForPath("./a_file"))),
"/a_file");
EXPECT_EQ(GetRelativePath(fs->TranslateName(
GetURIForPath("a/convoluted/../path/./to/.
"/a/path/to/a/file");
}
TEST_P(ModularFileSystemTest, TestCreateFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(new_path, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestAppendFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> new_file;
status = env_->NewAppendableFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenAppendFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestAppendFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<RandomAccessFile> new_file;
Status status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestReadFileExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<RandomAccessFile> new_file;
status = env_->NewRandomAccessFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateThenReadFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(filepath, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestReadFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<RandomAccessFile> same_file;
status = env_->NewRandomAccessFile(new_path, &same_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegion) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionNonExisting) {
const std::string filepath = GetURIForPath("dir_not_found/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
Status status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionExistingDir) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> new_file;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, &new_file);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::INVALID_ARGUMENT);
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = new_file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = new_file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = new_file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(filepath, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "NewReadOnlyMemoryRegionFromFile() not supported: "
<< status;
EXPECT_EQ(region->length(), test_data.size());
EXPECT_STREQ(reinterpret_cast<const char*>(region->data()),
test_data.c_str());
}
TEST_P(ModularFileSystemTest, TestCreateMemoryRegionFromFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::string new_path = GetURIForPath("a_file/a_file");
std::unique_ptr<ReadOnlyMemoryRegion> region;
status = env_->NewReadOnlyMemoryRegionFromFile(new_path, ®ion);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCreateDir) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCreateDirNoParent) {
const std::string dirpath = GetURIForPath("dir_not_found/a_dir");
Status status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->CreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirTwice) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->CreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::ALREADY_EXISTS);
}
TEST_P(ModularFileSystemTest, TestCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->CreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDir) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirInATree) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("a/path/to/a/another/dir");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->RecursivelyCreateDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirTwice) {
const std::string dirpath = GetURIForPath("a/path/to/a/dir");
Status status = env_->RecursivelyCreateDir(dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
status = env_->RecursivelyCreateDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->RecursivelyCreateDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedDir) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRecursivelyCreateDirFromNestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_dirpath = GetURIForPath("some/path/to_a_file/error");
status = env_->RecursivelyCreateDir(new_dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteFileDoesNotExist) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->DeleteFile(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteFileWhichIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteFile(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteFilePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_new_file");
status = env_->DeleteFile(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryFromDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string target_path = GetURIForPath("a_dir/another_dir");
EXPECT_EQ(env_->CreateDir(target_path).code(), Code::OK);
status = env_->DeleteDir(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryWhichIsFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->DeleteDir(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteDirectoryPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
status = env_->DeleteDir(new_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyNotEmpty) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string some_path = GetURIForPath("a_dir/another_dir");
status = env_->CreateDir(some_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string another_path = GetURIForPath("a_dir/yet_another_dir");
status = env_->CreateDir(another_path);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("a_dir/a_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyDoesNotExist) {
const std::string dirpath = GetURIForPath("a_dir");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
Status status =
env_->DeleteRecursively(dirpath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 1);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyAFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_path = GetURIForPath("a_file/a_dir");
int64_t undeleted_files, undeleted_dirs;
status = env_->DeleteRecursively(new_path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedDir) {
const std::string parent_path = GetURIForPath("parent/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string new_dirpath = GetURIForPath("parent/path/that/is/extended");
status = env_->RecursivelyCreateDir(new_dirpath);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string path = GetURIForPath("parent/path/that");
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(path, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestDeleteRecursivelyANestedFile) {
const std::string parent_path = GetURIForPath("some/path");
Status status = env_->RecursivelyCreateDir(parent_path);
if (!status.ok())
GTEST_SKIP() << "RecursivelyCreateDir() not supported: " << status;
const std::string filepath = GetURIForPath("some/path/to_a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
status = env_->DeleteRecursively(filepath, &undeleted_files, &undeleted_dirs);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
status = env_->FileExists(parent_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFileOverwrite) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourceNotFound) {
const std::string filepath = GetURIForPath("a_file");
const std::string new_filepath = GetURIForPath("a_new_file");
Status status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestRenameFileDestinationParentNotFound) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_dir/a_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourceIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(dirpath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileTargetIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string dirpath = GetURIForPath("a_dir");
status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->RenameFile(filepath, dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileSourcePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string old_filepath = GetURIForPath("a_file/x");
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileTargetPathIsInvalid) {
const std::string old_filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> old_file;
Status status = env_->NewWritableFile(old_filepath, &old_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_file/a_new_file");
status = env_->RenameFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestRenameFileCompareContents) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->RenameFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(new_filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestCopyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCopyFileOverwrite) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
std::unique_ptr<WritableFile> new_file;
status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "CopyFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
status = env_->FileExists(new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourceNotFound) {
const std::string filepath = GetURIForPath("a_file");
const std::string new_filepath = GetURIForPath("a_new_file");
Status status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourceIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(dirpath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileTargetIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> new_file;
Status status = env_->NewWritableFile(filepath, &new_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string dirpath = GetURIForPath("a_dir");
status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->CopyFile(filepath, dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileSourcePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string old_filepath = GetURIForPath("a_file/x");
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileTargetPathIsInvalid) {
const std::string old_filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> old_file;
Status status = env_->NewWritableFile(old_filepath, &old_file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_file/a_new_file");
status = env_->CopyFile(old_filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestCopyFileCompareContents) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
const std::string new_filepath = GetURIForPath("a_new_file");
status = env_->CopyFile(filepath, new_filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "RenameFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
status = env_->GetFileSize(new_filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestFileExists) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFileExistsButIsDirectory) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->CreateDir(filepath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFileExistsNotFound) {
const std::string filepath = GetURIForPath("a_file");
Status status = env_->FileExists(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestFileExistsPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
status = env_->FileExists(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestFilesExist) {
const std::vector<std::string> filenames = {GetURIForPath("a"),
GetURIForPath("b")};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
EXPECT_TRUE(env_->FilesExist(filenames, nullptr));
std::vector<Status> statuses;
EXPECT_TRUE(env_->FilesExist(filenames, &statuses));
EXPECT_EQ(statuses.size(), filenames.size());
for (const auto& status : statuses)
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestFilesExistAllFailureModes) {
const std::vector<std::string> filenames = {
GetURIForPath("a_dir"),
GetURIForPath("a_file"),
GetURIForPath("a_file/a_new_file"),
GetURIForPath("file_not_found"),
};
Status status = env_->CreateDir(filenames[0]);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filenames[1], &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::vector<Status> statuses;
EXPECT_FALSE(env_->FilesExist(filenames, &statuses));
EXPECT_EQ(statuses.size(), filenames.size());
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[0], Code::OK);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[1], Code::OK);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[2],
Code::FAILED_PRECONDITION);
EXPECT_PRED2(UnimplementedOrReturnsCode, statuses[3], Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestFilesExistsNoFiles) {
const std::vector<std::string> filenames = {};
EXPECT_TRUE(env_->FilesExist(filenames, nullptr));
std::vector<Status> statuses;
EXPECT_TRUE(env_->FilesExist(filenames, &statuses));
EXPECT_TRUE(statuses.empty());
}
TEST_P(ModularFileSystemTest, TestStatEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
FileStatistics stat;
status = env_->Stat(filepath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_FALSE(stat.is_directory);
EXPECT_EQ(stat.length, 0);
}
TEST_P(ModularFileSystemTest, TestStatNonEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
FileStatistics stat;
status = env_->Stat(filepath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_FALSE(stat.is_directory);
EXPECT_EQ(stat.length, test_data.size());
}
TEST_P(ModularFileSystemTest, TestStatDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
FileStatistics stat;
status = env_->Stat(dirpath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Stat() not supported: " << status;
EXPECT_TRUE(stat.is_directory);
}
TEST_P(ModularFileSystemTest, TestStatNotFound) {
const std::string dirpath = GetURIForPath("a_dir");
FileStatistics stat;
Status status = env_->Stat(dirpath, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestStatPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
FileStatistics stat;
status = env_->Stat(target_path, &stat);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestIsDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
status = env_->IsDirectory(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = env_->IsDirectory(filepath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryNotFound) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->IsDirectory(dirpath);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestIsDirectoryPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
status = env_->IsDirectory(target_path);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, 0);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeNonEmptyFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
uint64 size;
status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetFileSize() not supported: " << status;
EXPECT_EQ(size, test_data.size());
}
TEST_P(ModularFileSystemTest, TestGetFileSizeDirectory) {
const std::string dirpath = GetURIForPath("a_dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
uint64 size;
status = env_->GetFileSize(dirpath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetFileSizeNotFound) {
const std::string filepath = GetURIForPath("a_dir");
uint64 size;
Status status = env_->GetFileSize(filepath, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestGetFileSizePathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_file");
uint64 size;
status = env_->GetFileSize(target_path, &size);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetChildren) {
const std::string dirpath = GetURIForPath("dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
const std::vector<std::string> filenames = {
GetURIForPath("dir/a_file"),
GetURIForPath("dir/another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
const std::vector<std::string> dirnames = {
GetURIForPath("dir/a_dir"),
GetURIForPath("dir/another_dir"),
};
for (const auto& dirname : dirnames) {
status = env_->CreateDir(dirname);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
}
std::vector<std::string> children;
status = env_->GetChildren(dirpath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "GetChildren() not supported: " << status;
const std::vector<std::string> expected_children = {"a_file", "another_file",
"a_dir", "another_dir"};
EXPECT_EQ(children.size(), filenames.size() + dirnames.size());
for (const auto& child : expected_children)
EXPECT_NE(std::find(children.begin(), children.end(), child),
children.end());
}
TEST_P(ModularFileSystemTest, TestGetChildrenEmpty) {
const std::string dirpath = GetURIForPath("dir");
Status status = env_->CreateDir(dirpath);
if (!status.ok()) GTEST_SKIP() << "CreateDir() not supported: " << status;
std::vector<std::string> children;
status = env_->GetChildren(dirpath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(children.size(), 0);
}
TEST_P(ModularFileSystemTest, TestGetChildrenOfFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
std::vector<std::string> children;
status = env_->GetChildren(filepath, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetChildrenPathNotFound) {
const std::string target_path = GetURIForPath("a_dir");
std::vector<std::string> children;
Status status = env_->GetChildren(target_path, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::NOT_FOUND);
}
TEST_P(ModularFileSystemTest, TestGetChildrenPathIsInvalid) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string target_path = GetURIForPath("a_file/a_new_dir");
std::vector<std::string> children;
status = env_->GetChildren(target_path, &children);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::FAILED_PRECONDITION);
}
TEST_P(ModularFileSystemTest, TestGetMatchingPaths) {
const std::vector<std::string> matching_filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
};
const std::vector<std::string> other_filenames = {
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : matching_filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
for (const auto& filename : other_filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("/a*"), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), matching_filenames.size());
for (const auto& match : matching_filenames)
EXPECT_NE(std::find(results.begin(), results.end(), match), results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyFileSystem) {
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("a*"), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(results.size(), 0);
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsEmptyPattern) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath(""), &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), 1);
EXPECT_NE(std::find(results.begin(), results.end(), GetURIForPath("")),
results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsLiteralMatch) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(filenames[0], &results);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_EQ(results.size(), 1);
EXPECT_NE(std::find(results.begin(), results.end(), filenames[0]),
results.end());
}
TEST_P(ModularFileSystemTest, TestGetMatchingPathsNoMatch) {
const std::vector<std::string> filenames = {
GetURIForPath("a_file"),
GetURIForPath("another_file"),
GetURIForPath("some_file"),
GetURIForPath("yet_another_file"),
};
for (const auto& filename : filenames) {
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
}
std::vector<std::string> results;
Status status = env_->GetMatchingPaths(GetURIForPath("x?y*"), &results);
if (!status.ok())
GTEST_SKIP() << "GetMatchingPaths() not supported: " << status;
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(results.size(), 0);
}
TEST_P(ModularFileSystemTest, TestAppendAndTell) {
const std::string filename = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
int64_t position;
status = file->Tell(&position);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Tell() not supported: " << status;
EXPECT_EQ(position, 0);
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Tell(&position);
EXPECT_EQ(status.code(), Code::OK);
EXPECT_EQ(position, test_data.size());
}
TEST_P(ModularFileSystemTest, TestClose) {
const std::string filename = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filename, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
}
TEST_P(ModularFileSystemTest, TestRoundTrip) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size(), &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(test_data, result);
}
TEST_P(ModularFileSystemTest, TestRoundTripWithAppendableFile) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<WritableFile> same_file;
status = env_->NewAppendableFile(filepath, &same_file);
if (!status.ok())
GTEST_SKIP() << "NewAppendableFile() not supported: " << status;
const std::string more_test_data("qwer");
EXPECT_EQ(same_file->Append(more_test_data).code(), Code::OK);
EXPECT_EQ(same_file->Flush().code(), Code::OK);
EXPECT_EQ(same_file->Close().code(), Code::OK);
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size() + more_test_data.size(), &result,
scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
EXPECT_EQ(test_data + more_test_data, result);
EXPECT_EQ(
read_file->Read(test_data.size(), more_test_data.size(), &result, scratch)
.code(),
Code::OK);
EXPECT_EQ(more_test_data, result);
}
TEST_P(ModularFileSystemTest, TestReadOutOfRange) {
const std::string filepath = GetURIForPath("a_file");
std::unique_ptr<WritableFile> file;
Status status = env_->NewWritableFile(filepath, &file);
if (!status.ok())
GTEST_SKIP() << "NewWritableFile() not supported: " << status;
const std::string test_data("asdf");
status = file->Append(test_data);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Append() not supported: " << status;
status = file->Flush();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Flush() not supported: " << status;
status = file->Close();
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OK);
if (!status.ok()) GTEST_SKIP() << "Close() not supported: " << status;
std::unique_ptr<RandomAccessFile> read_file;
status = env_->NewRandomAccessFile(filepath, &read_file);
if (!status.ok())
GTEST_SKIP() << "NewRandomAccessFile() not supported: " << status;
char scratch[64 ] = {0};
StringPiece result;
status = read_file->Read(0, test_data.size() + 1, &result, scratch);
EXPECT_PRED2(UnimplementedOrReturnsCode, status, Code::OUT_OF_RANGE);
}
static std::vector<std::string>* SchemeVector() {
static std::vector<std::string>* schemes = new std::vector<std::string>;
return schemes;
}
static std::vector<std::string>* GetSchemesFromUserOrEnv() {
std::vector<std::string>* all_schemes = new std::vector<std::string>;
tensorflow::Status status =
tensorflow::Env::Default()->GetRegisteredFileSystemSchemes(all_schemes);
if (status.ok()) {
std::vector<std::string>* user_schemes = SchemeVector();
if (!user_schemes->empty()) {
auto is_requested_scheme = [user_schemes](const auto& scheme) {
return std::find(user_schemes->begin(), user_schemes->end(), scheme) ==
user_schemes->end();
};
auto end = std::remove_if(all_schemes->begin(), all_schemes->end(),
is_requested_scheme);
all_schemes->erase(end, all_schemes->end());
}
}
return all_schemes;
}
static std::vector<std::string> GetSchemes() {
static std::vector<std::string>* schemes = GetSchemesFromUserOrEnv();
return *schemes;
}
INSTANTIATE_TEST_SUITE_P(ModularFileSystem, ModularFileSystemTest,
::testing::ValuesIn(GetSchemes()));
static bool LoadDSO(const std::string& dso) {
tensorflow::Status status = RegisterFilesystemPlugin(dso);
if (!status.ok())
VLOG(0) << "Filesystems from '" << dso
<< "' could not be registered: " << status;
return status.ok();
}
static bool GetURIScheme(const std::string& scheme) {
tensorflow::SchemeVector()->push_back(scheme);
return true;
}
static bool SetCloudPath(const std::string& cloud_path_) {
ModularFileSystemTest::SetCloudPath(cloud_path_);
return true;
}
static bool SetTmpDir(const std::string& tmp_dir_) {
ModularFileSystemTest::SetTmpDir(tmp_dir_);
return true;
}
}
}
GTEST_API_ int main(int argc, char** argv) {
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("dso", tensorflow::LoadDSO, "",
"Path to shared object to load"),
tensorflow::Flag("scheme", tensorflow::GetURIScheme, "",
"URI scheme to test"),
tensorflow::Flag("cloud_path", tensorflow::SetCloudPath, "",
"Path for cloud filesystem (namenode for hdfs, "
"bucketname for s3/gcs)"),
tensorflow::Flag("tmp_dir", tensorflow::SetTmpDir, "",
"Temporary directory to store test data.")};
if (!tensorflow::Flags::Parse(&argc, argv, flag_list)) {
std::cout << tensorflow::Flags::Usage(argv[0], flag_list);
return -1;
}
tensorflow::testing::InstallStacktraceHandler();
tensorflow::ModularFileSystemTest::InitializeTestRNG();
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/modular_filesystem.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/modular_filesystem_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e37c532-87dd-48a8-b0b5-c412fc21335e | cpp | tensorflow/tensorflow | dispatcher_client | tensorflow/core/data/service/dispatcher_client.cc | tensorflow/core/data/service/dispatcher_client_test.cc | #include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/channel_arguments.h"
#include "grpcpp/support/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/service/common.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/credentials_factory.h"
#include "tensorflow/core/data/service/dispatcher.grpc.pb.h"
#include "tensorflow/core/data/service/dispatcher.pb.h"
#include "tensorflow/core/data/service/grpc_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace data {
Status DataServiceDispatcherClient::Initialize() {
mutex_lock l(mu_);
if (stub_) {
return absl::OkStatus();
}
std::shared_ptr<grpc::ChannelCredentials> credentials;
TF_RETURN_IF_ERROR(
CredentialsFactory::CreateClientCredentials(protocol_, &credentials));
grpc::ChannelArguments args;
args.SetMaxReceiveMessageSize(std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_USE_LOCAL_SUBCHANNEL_POOL, true);
auto channel = grpc::CreateCustomChannel(address_, credentials, args);
stub_ = DispatcherService::NewStub(channel);
GetVersionRequest req;
GetVersionResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetVersion(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get dispatcher version from dispatcher "
"running at ",
address_),
s);
}
if (resp.version() != kDataServiceVersion) {
return errors::FailedPrecondition(
"Version mismatch with tf.data service server. The server is running "
"version ",
resp.version(), ", while the client is running version ",
kDataServiceVersion,
". Please ensure that the client and server side are running the "
"same version of TensorFlow. If you're running an MPM binary, make "
"sure the server is running an up-to-date MPM.");
}
return absl::OkStatus();
}
absl::StatusOr<WorkerHeartbeatResponse>
DataServiceDispatcherClient::WorkerHeartbeat(
const WorkerHeartbeatRequest& request) {
WorkerHeartbeatResponse response;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerHeartbeat(&client_ctx, request, &response);
if (!status.ok()) {
return grpc_util::WrapError("Failed to perform worker heartbeat", status);
}
return response;
}
Status DataServiceDispatcherClient::WorkerUpdate(
const std::string& worker_address,
std::vector<TaskProgress>& task_progress) {
WorkerUpdateRequest req;
req.set_worker_address(worker_address);
for (const auto& update : task_progress) {
*(req.add_updates()) = update;
}
WorkerUpdateResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->WorkerUpdate(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to send worker update", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDatasetDef(const std::string& dataset_id,
DatasetDef& dataset_def) {
GetDatasetDefRequest req;
req.set_dataset_id(dataset_id);
GetDatasetDefResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetDatasetDef(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get dataset def", status);
}
dataset_def = resp.dataset_def();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSplit(int64_t iteration_id,
int64_t repetition,
int64_t split_provider_index,
Tensor& split,
bool& end_of_splits) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetSplitRequest req;
req.set_iteration_id(iteration_id);
req.set_repetition(repetition);
req.set_split_provider_index(split_provider_index);
GetSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get split", status);
}
end_of_splits = resp.end_of_splits();
if (!end_of_splits) {
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto");
}
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::Snapshot(
const DatasetDef& dataset, const std::string& path,
const experimental::DistributedSnapshotMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
SnapshotRequest req;
*req.mutable_dataset() = dataset;
req.set_path(path);
*req.mutable_metadata() = metadata;
SnapshotResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->Snapshot(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to snapshot", status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetSnapshotSplit(
const std::string& worker_address, const std::string& base_path,
int64_t stream_index, int64_t source_index, int64_t repetition_index,
Tensor& split, int64_t& local_split_index, bool& end_of_splits) {
GetSnapshotSplitRequest req;
req.set_worker_address(worker_address);
req.set_base_path(base_path);
req.set_stream_index(stream_index);
req.set_source_index(source_index);
req.set_repetition_index(repetition_index);
GetSnapshotSplitResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetSnapshotSplit(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to get snapshot split", status);
}
local_split_index = resp.local_split_index();
end_of_splits = resp.end_of_splits();
if (end_of_splits) {
return absl::OkStatus();
}
if (!split.FromProto(resp.split())) {
return errors::Internal("Failed to parse split tensor proto: ",
resp.split().DebugString());
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id,
std::string& dataset_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrRegisterDatasetRequest req;
*req.mutable_dataset() = dataset;
*req.mutable_metadata() = metadata;
if (requested_dataset_id.has_value()) {
req.set_dataset_id(*requested_dataset_id);
}
GetOrRegisterDatasetResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrRegisterDataset(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to register dataset", status);
}
dataset_id = resp.dataset_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateJob(
const std::string& dataset_id, const ProcessingModeDef& processing_mode,
const std::optional<std::string>& job_name,
std::optional<int64_t> num_consumers, bool use_cross_trainer_cache,
TargetWorkers target_workers, int64_t& job_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateJobRequest req;
req.set_dataset_id(dataset_id);
*req.mutable_processing_mode_def() = processing_mode;
if (job_name.has_value()) {
req.set_job_name(job_name.value());
}
if (num_consumers.has_value()) {
req.set_num_consumers(num_consumers.value());
}
req.set_target_workers(target_workers);
req.set_use_cross_trainer_cache(use_cross_trainer_cache);
GetOrCreateJobResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateJob(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create job for dataset with id ",
dataset_id),
status);
}
job_id = resp.job_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetOrCreateIteration(
int64_t job_id, int64_t repetition, int64_t& iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetOrCreateIterationRequest req;
req.set_job_id(job_id);
req.set_repetition(repetition);
GetOrCreateIterationResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->GetOrCreateIteration(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to get or create iteration for job with id ",
job_id),
status);
}
iteration_client_id = resp.iteration_client_id();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ReleaseIterationClient(
int64_t iteration_client_id) {
TF_RETURN_IF_ERROR(EnsureInitialized());
ReleaseIterationClientRequest req;
req.set_iteration_client_id(iteration_client_id);
ReleaseIterationClientResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->ReleaseIterationClient(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError(
absl::StrCat("Failed to release iteration client with id ",
iteration_client_id),
status);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::MaybeRemoveTask(int64_t task_id,
int64_t consumer_index,
int64_t round,
bool& removed) {
TF_RETURN_IF_ERROR(EnsureInitialized());
MaybeRemoveTaskRequest req;
req.set_task_id(task_id);
req.set_consumer_index(consumer_index);
req.set_round(round);
MaybeRemoveTaskResponse resp;
grpc::ClientContext client_ctx;
grpc::Status status = stub_->MaybeRemoveTask(&client_ctx, req, &resp);
if (!status.ok()) {
return grpc_util::WrapError("Failed to call MaybeRemoveTask", status);
}
removed = resp.removed();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::ClientHeartbeat(
ClientHeartbeatRequest& req, ClientHeartbeatResponse& resp) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
grpc::Status s = stub_->ClientHeartbeat(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get tasks", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetWorkers(
std::vector<WorkerInfo>& workers) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetWorkersRequest req;
GetWorkersResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetWorkers(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get workers", s);
}
workers.clear();
for (auto& worker : resp.workers()) {
workers.push_back(worker);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceMetadata(
const std::string& dataset_id, DataServiceMetadata& metadata) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceMetadataRequest req;
req.set_dataset_id(dataset_id);
GetDataServiceMetadataResponse resp;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceMetadata(&ctx, req, &resp);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service metadata", s);
}
metadata = resp.metadata();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::GetDataServiceConfig(
DataServiceConfig& config) {
TF_RETURN_IF_ERROR(EnsureInitialized());
GetDataServiceConfigRequest request;
GetDataServiceConfigResponse response;
grpc::ClientContext ctx;
grpc::Status s = stub_->GetDataServiceConfig(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError("Failed to get data service config", s);
}
config = response.config();
return absl::OkStatus();
}
Status DataServiceDispatcherClient::DisableCompressionAtRuntime(
const std::string& dataset_id, bool disable_compression_at_runtime,
DisableCompressionAtRuntimeResponse& response) {
TF_RETURN_IF_ERROR(EnsureInitialized());
grpc::ClientContext ctx;
DisableCompressionAtRuntimeRequest request;
request.set_dataset_id(dataset_id);
request.set_disable_compression_at_runtime(disable_compression_at_runtime);
grpc::Status s = stub_->DisableCompressionAtRuntime(&ctx, request, &response);
if (!s.ok()) {
return grpc_util::WrapError(
"Failed to get runtime compression disabling decision", s);
}
return absl::OkStatus();
}
Status DataServiceDispatcherClient::EnsureInitialized() {
return grpc_util::Retry([this] { return Initialize(); },
"Initialize dispatcher client",
kint64max);
}
}
} | #include "tensorflow/core/data/service/dispatcher_client.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/service/common.pb.h"
#include "tensorflow/core/data/service/data_transfer.h"
#include "tensorflow/core/data/service/dataset_store.h"
#include "tensorflow/core/data/service/snapshot/path_utils.h"
#include "tensorflow/core/data/service/test_cluster.h"
#include "tensorflow/core/data/service/test_util.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/data_service.pb.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/protobuf/snapshot.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::data::experimental::DistributedSnapshotMetadata;
using ::tensorflow::data::testing::CreateDummyDistributedSnapshotMetadata;
using ::tensorflow::data::testing::EqualsProto;
using ::tensorflow::data::testing::InfiniteDataset;
using ::tensorflow::data::testing::LocalTempFilename;
using ::tensorflow::data::testing::RangeDataset;
using ::tensorflow::testing::StatusIs;
using ::testing::AllOf;
using ::testing::ContainsRegex;
using ::testing::HasSubstr;
constexpr const char kProtocol[] = "grpc";
DataServiceMetadata GetDefaultMetadata() {
StructuredValue decoded_spec;
TensorShapeProto::Dim* dim =
decoded_spec.mutable_tensor_shape_value()->add_dim();
dim->set_size(1);
dim->set_name(absl::StrCat("dim"));
DataServiceMetadata metadata;
metadata.set_element_spec(decoded_spec.SerializeAsString());
metadata.set_compression(DataServiceMetadata::COMPRESSION_SNAPPY);
metadata.set_cardinality(kUnknownCardinality);
return metadata;
}
class DispatcherClientTest : public ::testing::Test {
protected:
absl::Status SetUpTfDataService(int64_t num_workers,
int64_t worker_max_concurrent_snapshots = 0) {
TestCluster::Config config;
config.num_workers = num_workers;
config.work_dir = tsl::io::JoinPath(tsl::testing::TmpDir(), "work_dir");
config.worker_max_concurrent_snapshots = worker_max_concurrent_snapshots;
test_cluster_ = std::make_unique<TestCluster>(config);
TF_RETURN_IF_ERROR(test_cluster_->Initialize());
dispatcher_client_ = std::make_unique<DataServiceDispatcherClient>(
test_cluster_->DispatcherAddress(), kProtocol);
return absl::OkStatus();
}
absl::StatusOr<std::string> RegisterDataset(
const DatasetDef& dataset, const DataServiceMetadata& metadata,
const std::optional<std::string>& requested_dataset_id = std::nullopt) {
std::string dataset_id;
TF_RETURN_IF_ERROR(dispatcher_client_->RegisterDataset(
dataset, metadata, requested_dataset_id, dataset_id));
return dataset_id;
}
absl::StatusOr<absl::flat_hash_set<std::string>> StartDummySnapshots(
int64_t num_snapshots) {
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
absl::flat_hash_set<std::string> directories;
for (int64_t i = 0; i < num_snapshots; ++i) {
directories.insert(LocalTempFilename());
}
for (const auto& directory : directories) {
TF_RETURN_IF_ERROR(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
}
return directories;
}
std::unique_ptr<TestCluster> test_cluster_;
std::unique_ptr<DataServiceDispatcherClient> dispatcher_client_;
};
TEST_F(DispatcherClientTest, GetDataServiceMetadata) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
DataServiceMetadata result;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceMetadata(dataset_id, result));
EXPECT_THAT(result, EqualsProto(metadata));
}
TEST_F(DispatcherClientTest, DatasetDoesNotExist) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
EXPECT_THAT(
dispatcher_client_->GetDataServiceMetadata(
"not-found", metadata),
StatusIs(error::NOT_FOUND, HasSubstr("Dataset id not-found not found")));
}
TEST_F(DispatcherClientTest, SnapshotAlreadyStarted) {
TF_ASSERT_OK(SetUpTfDataService(1));
DistributedSnapshotMetadata metadata =
CreateDummyDistributedSnapshotMetadata();
std::string directory = LocalTempFilename();
TF_ASSERT_OK(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata));
EXPECT_THAT(
dispatcher_client_->Snapshot(RangeDataset(10), directory, metadata),
StatusIs(error::ALREADY_EXISTS, HasSubstr("already started")));
}
TEST_F(DispatcherClientTest, GetDataServiceConfig) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceConfig config;
TF_ASSERT_OK(dispatcher_client_->GetDataServiceConfig(config));
EXPECT_EQ(config.deployment_mode(), DEPLOYMENT_MODE_COLOCATED);
}
TEST_F(DispatcherClientTest, SnapshotSkeletonWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(Env::Default()->FileExists(CommittedChunksDirectory(path)));
TF_ASSERT_OK(Env::Default()->FileExists(StreamsDirectory(path)));
}
}
TEST_F(DispatcherClientTest, SnapshotMetadataAndDatasetDefWritten) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
for (const auto& path : paths) {
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "snapshot.metadata")));
TF_ASSERT_OK(
Env::Default()->FileExists(io::JoinPath(path, "dataset_def.proto")));
}
}
TEST_F(DispatcherClientTest, SnapshotsInHeartbeat) {
TF_ASSERT_OK(SetUpTfDataService(1,
3));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
for (int64_t i = 1; i <= 3; ++i) {
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.snapshot_tasks_size(), i);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
ASSERT_TRUE(paths.count(snapshot_task.base_path()));
ASSERT_EQ(snapshot_task.stream_index(), 0);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplit) {
TF_ASSERT_OK(SetUpTfDataService(1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
for (int64_t i = 0; i < 5; ++i) {
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(0), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, i);
EXPECT_FALSE(end_of_splits);
}
}
}
TEST_F(DispatcherClientTest, GetSnapshotSplitMultipleStreams) {
TF_ASSERT_OK(SetUpTfDataService(3,
1));
TF_ASSERT_OK_AND_ASSIGN(absl::flat_hash_set<std::string> paths,
StartDummySnapshots(3));
absl::flat_hash_set<std::string> snapshots_in_progress;
for (int64_t i = 0; i < 3; ++i) {
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(
test_cluster_->WorkerAddress(i));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
EXPECT_EQ(worker_heartbeat_response.snapshot_tasks().size(), 1);
for (const auto& snapshot_task :
worker_heartbeat_response.snapshot_tasks()) {
snapshots_in_progress.insert(snapshot_task.base_path());
GetSnapshotSplitRequest get_snapshot_split_request;
Tensor split;
int64_t local_split_index = 0;
bool end_of_splits = false;
TF_ASSERT_OK(dispatcher_client_->GetSnapshotSplit(
test_cluster_->WorkerAddress(i), snapshot_task.base_path(),
snapshot_task.stream_index(),
0, 0, split, local_split_index,
end_of_splits));
EXPECT_EQ(local_split_index, 0);
EXPECT_FALSE(end_of_splits);
}
}
EXPECT_EQ(snapshots_in_progress, paths);
}
TEST_F(DispatcherClientTest, RegisterDatasetWithExplicitId) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id2,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, dataset_id2);
}
TEST_F(DispatcherClientTest, DatasetsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(
const std::string dataset_id1,
RegisterDataset(RangeDataset(10), metadata,
"dataset_id"));
EXPECT_EQ(dataset_id1, "dataset_id");
metadata.set_cardinality(kInfiniteCardinality);
EXPECT_THAT(
RegisterDataset(InfiniteDataset(), metadata,
"dataset_id"),
StatusIs(
error::INVALID_ARGUMENT,
HasSubstr(
"Datasets with the same ID should have the same structure")));
}
TEST_F(DispatcherClientTest, EnableCrossTrainerCache) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(kInfiniteCardinality);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(InfiniteDataset(), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id));
int64_t iteration_client_id;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateIteration(
job_id, 0, iteration_client_id));
WorkerHeartbeatRequest worker_heartbeat_request;
worker_heartbeat_request.set_worker_address(test_cluster_->WorkerAddress(0));
TF_ASSERT_OK_AND_ASSIGN(
WorkerHeartbeatResponse worker_heartbeat_response,
dispatcher_client_->WorkerHeartbeat(worker_heartbeat_request));
ASSERT_EQ(worker_heartbeat_response.new_tasks_size(), 1);
EXPECT_TRUE(worker_heartbeat_response.new_tasks(0).use_cross_trainer_cache());
}
TEST_F(DispatcherClientTest, CreateNamedJob) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
int64_t job_id_1 = -1;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_1));
int64_t job_id_2 = -2;
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
true, TARGET_WORKERS_AUTO, job_id_2));
ASSERT_EQ(job_id_1, job_id_2);
}
TEST_F(DispatcherClientTest, NamedJobsDoNotMatch) {
TF_ASSERT_OK(SetUpTfDataService(1));
DataServiceMetadata metadata = GetDefaultMetadata();
metadata.set_cardinality(10);
TF_ASSERT_OK_AND_ASSIGN(const std::string dataset_id,
RegisterDataset(RangeDataset(10), metadata));
int64_t job_id = 0;
ProcessingModeDef processing_mode;
processing_mode.set_sharding_policy(ProcessingModeDef::OFF);
std::string job_name = "job";
TF_ASSERT_OK(dispatcher_client_->GetOrCreateJob(
dataset_id, processing_mode, job_name,
std::nullopt,
false, TARGET_WORKERS_AUTO, job_id));
processing_mode.set_sharding_policy(ProcessingModeDef::DYNAMIC);
EXPECT_THAT(
dispatcher_client_->GetOrCreateJob(dataset_id, processing_mode, job_name,
std::nullopt,
true,
TARGET_WORKERS_AUTO, job_id),
StatusIs(error::INVALID_ARGUMENT,
AllOf(HasSubstr("but found an existing job with different "
"parameters: "),
ContainsRegex("Existing processing mode: <\\w*std::nullopt, dataset_id));
EXPECT_EQ(dataset_id, "1000");
std::string datasets_dir = tsl::io::JoinPath(config.work_dir, "datasets");
FileSystemDatasetStore dataset_store(datasets_dir);
TF_ASSERT_OK(dataset_store.Put("1001", dataset_def));
if (requested_dataset_id.has_value()) {
TF_ASSERT_OK(dataset_store.Put(*requested_dataset_id, dataset_def));
}
TF_ASSERT_OK(dispatcher_client_->RegisterDataset(
dataset_def, GetDefaultMetadata(),
requested_dataset_id, dataset_id));
if (requested_dataset_id.has_value()) {
EXPECT_EQ(dataset_id, *requested_dataset_id);
} else {
EXPECT_EQ(dataset_id, "1001");
}
}
INSTANTIATE_TEST_SUITE_P(DatasetId, DispatcherClientTest_DatasetId,
::testing::Values(std::nullopt, "dataset_id"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/dispatcher_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7a0dd09-19cd-4e14-ba62-dc1cebe0e8e2 | cpp | tensorflow/tensorflow | schema | tensorflow/core/summary/schema.cc | tensorflow/core/summary/schema_test.cc | #include "tensorflow/core/summary/schema.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
Status Run(Sqlite* db, const char* sql) {
SqliteStatement stmt;
TF_RETURN_IF_ERROR(db->Prepare(sql, &stmt));
return stmt.StepAndReset();
}
}
Status SetupTensorboardSqliteDb(Sqlite* db) {
TF_RETURN_IF_ERROR(
db->PrepareOrDie(strings::StrCat("PRAGMA application_id=",
kTensorboardSqliteApplicationId))
.StepAndReset());
db->PrepareOrDie("PRAGMA user_version=0").StepAndResetOrDie();
Status s;
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Ids (
id INTEGER PRIMARY KEY
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Descriptions (
id INTEGER PRIMARY KEY,
description TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tensors (
rowid INTEGER PRIMARY KEY,
series INTEGER,
step INTEGER,
dtype INTEGER,
computed_time REAL,
shape TEXT,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TensorSeriesStepIndex
ON
Tensors (series, step)
WHERE
series IS NOT NULL
AND step IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS TensorStrings (
rowid INTEGER PRIMARY KEY,
tensor_rowid INTEGER NOT NULL,
idx INTEGER NOT NULL,
data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TensorStringIndex
ON TensorStrings (tensor_rowid, idx)
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Tags (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
tag_id INTEGER NOT NULL,
inserted_time DOUBLE,
tag_name TEXT,
display_name TEXT,
plugin_name TEXT,
plugin_data BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS TagIdIndex
ON Tags (tag_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS
TagRunNameIndex
ON
Tags (run_id, tag_name)
WHERE
run_id IS NOT NULL
AND tag_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Runs (
rowid INTEGER PRIMARY KEY,
experiment_id INTEGER,
run_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
finished_time REAL,
run_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunIdIndex
ON Runs (run_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS RunNameIndex
ON Runs (experiment_id, run_name)
WHERE run_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Experiments (
rowid INTEGER PRIMARY KEY,
user_id INTEGER,
experiment_id INTEGER NOT NULL,
inserted_time REAL,
started_time REAL,
is_watching INTEGER,
experiment_name TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentIdIndex
ON Experiments (experiment_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS ExperimentNameIndex
ON Experiments (user_id, experiment_name)
WHERE experiment_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Users (
rowid INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
inserted_time REAL,
user_name TEXT,
email TEXT
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserIdIndex
ON Users (user_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserNameIndex
ON Users (user_name)
WHERE user_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS UserEmailIndex
ON Users (email)
WHERE email IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Graphs (
rowid INTEGER PRIMARY KEY,
run_id INTEGER,
graph_id INTEGER NOT NULL,
inserted_time REAL,
graph_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphIdIndex
ON Graphs (graph_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS GraphRunIndex
ON Graphs (run_id)
WHERE run_id IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS Nodes (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
node_name TEXT,
op TEXT,
device TEXT,
node_def BLOB
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeIdIndex
ON Nodes (graph_id, node_id)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeNameIndex
ON Nodes (graph_id, node_name)
WHERE node_name IS NOT NULL
)sql"));
s.Update(Run(db, R"sql(
CREATE TABLE IF NOT EXISTS NodeInputs (
rowid INTEGER PRIMARY KEY,
graph_id INTEGER NOT NULL,
node_id INTEGER NOT NULL,
idx INTEGER NOT NULL,
input_node_id INTEGER NOT NULL,
input_node_idx INTEGER,
is_control INTEGER
)
)sql"));
s.Update(Run(db, R"sql(
CREATE UNIQUE INDEX IF NOT EXISTS NodeInputsIndex
ON NodeInputs (graph_id, node_id, idx)
)sql"));
return s;
}
} | #include "tensorflow/core/summary/schema.h"
#include <memory>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(SchemaTest, SmokeTestTensorboardSchema) {
Sqlite* db;
TF_ASSERT_OK(Sqlite::Open(":memory:", SQLITE_OPEN_READWRITE, &db));
core::ScopedUnref unref_db(db);
TF_ASSERT_OK(SetupTensorboardSqliteDb(db));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/schema.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/summary/schema_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
558067bd-0e7a-47f3-aa77-bf649891f270 | cpp | google/arolla | logic_operators | arolla/qexpr/operators/core/logic_operators.h | arolla/qexpr/operators/core/logic_operators_test.cc | #ifndef AROLLA_OPERATORS_CORE_LOGIC_OPERATORS_H_
#define AROLLA_OPERATORS_CORE_LOGIC_OPERATORS_H_
#include <memory>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operator_errors.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/common_qtype.h"
#include "arolla/util/status.h"
#include "arolla/util/unit.h"
namespace arolla {
struct HasOp {
using run_on_missing = std::true_type;
template <typename T>
std::enable_if_t<is_optional_v<T>, OptionalUnit> operator()(
const T& arg) const {
return OptionalUnit{arg.present};
}
};
struct PresenceOrOp {
using run_on_missing = std::true_type;
template <typename T>
T operator()(const OptionalValue<T>& lhs, const T& rhs) const {
return lhs.present ? lhs.value : rhs;
}
template <typename T>
OptionalValue<T> operator()(const OptionalValue<T>& lhs,
const OptionalValue<T>& rhs) const {
return lhs.present ? lhs : rhs;
}
template <typename T>
T operator()(const T& lhs, const OptionalValue<T>& rhs) const {
return lhs;
}
template <typename T>
T operator()(const T& lhs, const T& rhs) const {
return lhs;
}
template <typename T, class Fn>
auto operator()(const OptionalValue<T>& lhs, const Fn& rhs) const {
using result_t = strip_statusor_t<std::decay_t<decltype(rhs())>>;
if constexpr (std::is_same_v<result_t, T>) {
return lhs.present ? lhs.value : rhs();
} else {
return lhs.present ? lhs : rhs();
}
}
template <typename T, class Fn>
T operator()(const T& lhs, const Fn&) const {
return lhs;
}
};
struct PresenceAndOp {
using run_on_missing = std::true_type;
template <typename T, std::enable_if_t<!std::is_invocable_v<T>, bool> = true>
const T& operator()(const T& lhs, Unit) const {
return lhs;
}
template <typename T, std::enable_if_t<!std::is_invocable_v<T>, bool> = true>
OptionalValue<T> operator()(const T& lhs, OptionalUnit rhs) const {
return rhs ? lhs : OptionalValue<T>{};
}
template <typename T>
OptionalValue<T> operator()(const OptionalValue<T>& lhs,
OptionalUnit rhs) const {
return rhs ? lhs : OptionalValue<T>{};
}
template <typename Fn, std::enable_if_t<std::is_invocable_v<Fn>, bool> = true>
auto operator()(const Fn& lhs, Unit) const {
return lhs();
}
template <typename Fn, std::enable_if_t<std::is_invocable_v<Fn>, bool> = true>
auto operator()(const Fn& lhs, OptionalUnit rhs) const {
using T = strip_optional_t<strip_statusor_t<std::decay_t<decltype(lhs())>>>;
constexpr bool is_status =
IsStatusOrT<std::decay_t<decltype(lhs())>>::value;
constexpr bool is_optional =
is_optional_v<strip_statusor_t<std::decay_t<decltype(lhs())>>>;
if constexpr (is_status) {
if constexpr (is_optional) {
return rhs ? lhs() : OptionalValue<T>{};
} else {
return rhs ? MakeStatusOrOptionalValue(lhs()) : OptionalValue<T>{};
}
} else {
return rhs ? lhs() : OptionalValue<T>{};
}
}
};
struct WhereOp {
using run_on_missing = std::true_type;
template <typename T, std::enable_if_t<!std::is_invocable_v<T>, bool> = true>
T operator()(OptionalUnit c, const T& a, const T& b) const {
return c.present ? a : b;
}
template <
typename AFn, typename BFn,
std::enable_if_t<std::is_invocable_v<AFn> && std::is_invocable_v<BFn>,
bool> = true>
auto operator()(OptionalUnit c, const AFn& a, const BFn& b) const {
return c.present ? a() : b();
}
template <
typename AFn, typename T,
std::enable_if_t<std::is_invocable_v<AFn> && !std::is_invocable_v<T>,
bool> = true>
auto operator()(OptionalUnit c, const AFn& a, const T& b) const {
return c.present ? a() : b;
}
template <
typename BFn, typename T,
std::enable_if_t<!std::is_invocable_v<T> && std::is_invocable_v<BFn>,
bool> = true>
auto operator()(OptionalUnit c, const T& a, const BFn& b) const {
return c.present ? a : b();
}
};
struct PresenceAndOrOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalValue<T> operator()(const OptionalValue<T>& a, OptionalUnit b,
const OptionalValue<T>& c) const {
return b && a.present ? a : c;
}
template <typename T>
T operator()(const OptionalValue<T>& a, OptionalUnit b, const T& c) const {
return b && a.present ? a.value : c;
}
template <typename T>
OptionalValue<T> operator()(const T& a, OptionalUnit b,
const OptionalValue<T>& c) const {
return b ? MakeOptionalValue(a) : c;
}
template <typename T>
T operator()(const T& a, OptionalUnit b, const T& c) const {
return b ? a : c;
}
template <typename T, class Fn>
auto operator()(const OptionalValue<T>& a, OptionalUnit b,
const Fn& c) const {
using result_t = strip_statusor_t<std::decay_t<decltype(c())>>;
if constexpr (std::is_same_v<result_t, T>) {
return b && a.present ? a.value : c();
} else {
return b && a.present ? a : c();
}
}
template <typename T, class Fn>
auto operator()(const T& a, OptionalUnit b, const Fn& c) const {
using result_t = strip_statusor_t<std::decay_t<decltype(c())>>;
if constexpr (std::is_same_v<result_t, T>) {
return b ? a : c();
} else {
return b ? MakeOptionalValue(a) : c();
}
}
};
struct PresenceNotOp {
using run_on_missing = std::true_type;
template <class T>
OptionalUnit operator()(const OptionalValue<T>& arg) const {
return OptionalUnit{!arg.present};
}
};
struct MaskEqualOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalUnit operator()(const T& lhs, const T& rhs) const {
return OptionalUnit{lhs == rhs};
}
};
struct MaskNotEqualOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalUnit operator()(const T& lhs, const T& rhs) const {
return OptionalUnit{lhs != rhs};
}
};
struct MaskLessOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalUnit operator()(const T& lhs, const T& rhs) const {
return OptionalUnit{lhs < rhs};
}
};
struct MaskLessEqualOp {
using run_on_missing = std::true_type;
template <typename T>
OptionalUnit operator()(const T& lhs, const T& rhs) const {
return OptionalUnit{(lhs < rhs) || (lhs == rhs)};
}
};
class FakeShortCircuitWhereOperatorFamily final : public OperatorFamily {
absl::StatusOr<OperatorPtr> DoGetOperator(
absl::Span<const QTypePtr> input_types,
QTypePtr output_type) const final {
auto not_defined_error = [&](absl::string_view detail) {
return OperatorNotDefinedError("core._short_circuit_where", input_types,
detail);
};
if (input_types.size() < 3) {
return not_defined_error("expected 3 arguments");
}
if (input_types[0] != GetQType<OptionalUnit>()) {
return not_defined_error("first argument must be OPTIONAL_UNIT");
}
QTypePtr true_type = input_types[1];
QTypePtr false_type = input_types[2];
const QType* common_type =
CommonQType(true_type, false_type, false);
if (common_type == nullptr) {
return not_defined_error("no common type between operator branches");
}
return EnsureOutputQTypeMatches(
std::make_unique<FakeShortCircuitWhereOperator>(
QExprOperatorSignature::Get(
{GetQType<OptionalUnit>(), common_type, common_type},
common_type)),
input_types, output_type);
}
class FakeShortCircuitWhereOperator final : public QExprOperator {
public:
explicit FakeShortCircuitWhereOperator(
const QExprOperatorSignature* signature)
: QExprOperator(signature) {}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const override {
return absl::InternalError(
"FakeShortCircuitWhereOperator is not supposed to be used");
}
};
};
}
#endif | #include "arolla/qexpr/operators/core/logic_operators.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Pointee;
using ::testing::Property;
using Oi64 = OptionalValue<int64_t>;
using DAi64 = DenseArray<int64_t>;
const int64_t one = 1;
const int64_t two = 2;
const Oi64 optional_one = 1;
const Oi64 optional_two = 2;
const Oi64 missing;
TEST(LogicOperatorsTest, PresenceOr) {
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_or", kPresent, kPresent),
IsOkAndHolds(kPresent));
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_or", kPresent, kMissing),
IsOkAndHolds(kPresent));
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_or", kMissing, kMissing),
IsOkAndHolds(kMissing));
EXPECT_THAT(InvokeOperator<int64_t>("core.presence_or", one, one),
IsOkAndHolds(one));
EXPECT_THAT(InvokeOperator<int64_t>("core.presence_or", one, optional_two),
IsOkAndHolds(one));
EXPECT_THAT(InvokeOperator<int64_t>("core.presence_or", missing, one),
IsOkAndHolds(one));
EXPECT_THAT(InvokeOperator<int64_t>("core.presence_or", optional_two, one),
IsOkAndHolds(two));
EXPECT_THAT(
InvokeOperator<Oi64>("core.presence_or", optional_two, optional_one),
IsOkAndHolds(optional_two));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_or", optional_two, missing),
IsOkAndHolds(optional_two));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_or", missing, optional_two),
IsOkAndHolds(optional_two));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_or", missing, missing),
IsOkAndHolds(missing));
}
TEST(LogicOperatorsTest, LazyPresenceOrFunctor) {
auto as_fn = [](auto x) { return [x]() { return x; }; };
auto as_no_call_fn = [](auto x) {
return [x]() {
ADD_FAILURE() << "function shouldn't be called";
return x;
};
};
EXPECT_EQ(PresenceOrOp{}(kPresent, as_no_call_fn(kPresent)), kPresent);
EXPECT_EQ(PresenceOrOp{}(kPresent, as_no_call_fn(kMissing)), kPresent);
EXPECT_EQ(PresenceOrOp{}(kMissing, as_fn(kMissing)), kMissing);
EXPECT_EQ(PresenceOrOp{}(one, as_no_call_fn(one)), one);
EXPECT_EQ(PresenceOrOp{}(one, as_no_call_fn(optional_two)), one);
EXPECT_EQ(PresenceOrOp{}(missing, as_fn(one)), one);
EXPECT_EQ(PresenceOrOp{}(optional_two, as_no_call_fn(one)), two);
EXPECT_EQ(PresenceOrOp{}(optional_two, as_no_call_fn(optional_one)),
optional_two);
EXPECT_EQ(PresenceOrOp{}(optional_two, as_no_call_fn(missing)), optional_two);
EXPECT_EQ(PresenceOrOp{}(missing, as_fn(optional_two)), optional_two);
EXPECT_EQ(PresenceOrOp{}(missing, as_fn(missing)), missing);
}
TEST(LogicOperatorsTest, WhereOperatorFamily) {
EXPECT_THAT(OperatorRegistry::GetInstance()->LookupOperator(
"core.where",
{GetQType<OptionalUnit>(), GetQType<int32_t>(),
GetOptionalQType<int64_t>()},
GetOptionalQType<int64_t>()),
IsOkAndHolds(Pointee(Property(
&QExprOperator::signature,
Eq(QExprOperatorSignature::Get(
{GetQType<OptionalUnit>(), GetOptionalQType<int64_t>(),
GetOptionalQType<int64_t>()},
GetOptionalQType<int64_t>()))))));
EXPECT_THAT(OperatorRegistry::GetInstance()->LookupOperator(
"core.where",
{GetQType<OptionalUnit>(), GetQType<int32_t>(),
GetDenseArrayQType<int64_t>()},
GetDenseArrayQType<int64_t>()),
IsOkAndHolds(Pointee(Property(
&QExprOperator::signature,
Eq(QExprOperatorSignature::Get(
{GetQType<OptionalUnit>(), GetDenseArrayQType<int64_t>(),
GetDenseArrayQType<int64_t>()},
GetDenseArrayQType<int64_t>()))))));
EXPECT_THAT(OperatorRegistry::GetInstance()->LookupOperator(
"core.where",
{GetQType<OptionalUnit>(), GetQType<int32_t>(),
GetArrayQType<int64_t>()},
GetArrayQType<int64_t>()),
IsOkAndHolds(Pointee(Property(
&QExprOperator::signature,
Eq(QExprOperatorSignature::Get(
{GetQType<OptionalUnit>(), GetArrayQType<int64_t>(),
GetArrayQType<int64_t>()},
GetArrayQType<int64_t>()))))));
EXPECT_THAT(
OperatorRegistry::GetInstance()->LookupOperator(
"core.where",
{GetDenseArrayQType<Unit>(), GetQType<int32_t>(),
GetQType<int64_t>()},
GetDenseArrayQType<int64_t>()),
IsOkAndHolds(Pointee(Property(
&QExprOperator::signature,
Eq(QExprOperatorSignature::Get(
{GetDenseArrayQType<Unit>(), GetDenseArrayQType<int64_t>(),
GetDenseArrayQType<int64_t>()},
GetDenseArrayQType<int64_t>()))))));
EXPECT_THAT(
OperatorRegistry::GetInstance()->LookupOperator(
"core.where",
{GetArrayQType<Unit>(), GetQType<int32_t>(), GetQType<int64_t>()},
GetArrayQType<int64_t>()),
IsOkAndHolds(
Pointee(Property(&QExprOperator::signature,
Eq(QExprOperatorSignature::Get(
{GetArrayQType<Unit>(), GetArrayQType<int64_t>(),
GetArrayQType<int64_t>()},
GetArrayQType<int64_t>()))))));
}
TEST(LogicOperatorsTest, LazyWhereFunctor) {
auto as_fn = [](auto x) { return [x]() { return x; }; };
auto as_no_call_fn = [](auto x) {
return [x]() {
ADD_FAILURE() << "function shouldn't be called";
return x;
};
};
EXPECT_EQ(WhereOp{}(kPresent, as_fn(kPresent), as_no_call_fn(kPresent)),
kPresent);
EXPECT_EQ(WhereOp{}(kPresent, as_fn(kMissing), as_no_call_fn(kPresent)),
kMissing);
EXPECT_EQ(WhereOp{}(kMissing, as_no_call_fn(kPresent), as_fn(kPresent)),
kPresent);
EXPECT_EQ(WhereOp{}(kMissing, as_no_call_fn(kPresent), as_fn(kMissing)),
kMissing);
EXPECT_EQ(WhereOp{}(kPresent, kPresent, as_no_call_fn(kPresent)), kPresent);
EXPECT_EQ(WhereOp{}(kPresent, kMissing, as_no_call_fn(kPresent)), kMissing);
EXPECT_EQ(WhereOp{}(kMissing, as_no_call_fn(kPresent), kPresent), kPresent);
EXPECT_EQ(WhereOp{}(kMissing, as_no_call_fn(kPresent), kMissing), kMissing);
auto as_status_fn = [](auto x) {
return [x]() { return absl::StatusOr<OptionalUnit>(x); };
};
EXPECT_THAT(WhereOp{}(kPresent, as_status_fn(kPresent), kPresent),
IsOkAndHolds(kPresent));
EXPECT_THAT(WhereOp{}(kMissing, kPresent, as_status_fn(kPresent)),
IsOkAndHolds(kPresent));
EXPECT_THAT(
WhereOp{}(kPresent, as_status_fn(kPresent), as_no_call_fn(kPresent)),
IsOkAndHolds(kPresent));
EXPECT_THAT(
WhereOp{}(kMissing, as_no_call_fn(kPresent), as_status_fn(kPresent)),
IsOkAndHolds(kPresent));
EXPECT_THAT(
WhereOp{}(kPresent, as_status_fn(kPresent), as_status_fn(kPresent)),
IsOkAndHolds(kPresent));
auto as_error_status_fn = []() {
return []() {
return absl::StatusOr<OptionalUnit>(absl::UnimplementedError(""));
};
};
EXPECT_THAT(WhereOp{}(kPresent, as_status_fn(kPresent), as_error_status_fn()),
IsOkAndHolds(kPresent));
EXPECT_THAT(WhereOp{}(kPresent, as_error_status_fn(), as_status_fn(kPresent))
.status(),
StatusIs(absl::StatusCode::kUnimplemented));
EXPECT_THAT(
WhereOp{}(kPresent, as_error_status_fn(), as_fn(kPresent)).status(),
StatusIs(absl::StatusCode::kUnimplemented));
EXPECT_THAT(WhereOp{}(kPresent, as_error_status_fn(), kPresent).status(),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST(LogicOperatorsTest, LazyPresenceOrWithStatusFunctor) {
auto as_fn = [](auto x) {
return [x]() { return absl::StatusOr<std::decay_t<decltype(x)>>(x); };
};
EXPECT_THAT(PresenceOrOp{}(kPresent, as_fn(kPresent)),
IsOkAndHolds(kPresent));
EXPECT_THAT(PresenceOrOp{}(kPresent, as_fn(kMissing)),
IsOkAndHolds(kPresent));
EXPECT_THAT(PresenceOrOp{}(kMissing, as_fn(kMissing)),
IsOkAndHolds(kMissing));
EXPECT_THAT(PresenceOrOp{}(one, as_fn(one)), Eq(one));
EXPECT_THAT(PresenceOrOp{}(one, as_fn(optional_two)), Eq(one));
EXPECT_THAT(PresenceOrOp{}(missing, as_fn(one)), IsOkAndHolds(one));
EXPECT_THAT(PresenceOrOp{}(optional_two, as_fn(one)), IsOkAndHolds(two));
EXPECT_THAT(PresenceOrOp{}(optional_two, as_fn(optional_one)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceOrOp{}(optional_two, as_fn(missing)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceOrOp{}(missing, as_fn(optional_two)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceOrOp{}(missing, as_fn(missing)), IsOkAndHolds(missing));
auto error_fn = []() {
return absl::StatusOr<OptionalUnit>(absl::InternalError("fake"));
};
EXPECT_THAT(PresenceOrOp{}(kMissing, error_fn),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
EXPECT_THAT(PresenceOrOp{}(kPresent, error_fn), IsOkAndHolds(kPresent));
}
TEST(LogicOperatorsTest, PresenceAndOr) {
EXPECT_THAT(
InvokeOperator<int64_t>("core._presence_and_or", one, kPresent, two),
IsOkAndHolds(one));
EXPECT_THAT(
InvokeOperator<int64_t>("core._presence_and_or", one, kMissing, two),
IsOkAndHolds(two));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", optional_one,
kPresent, optional_two),
IsOkAndHolds(optional_one));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", optional_one,
kMissing, optional_two),
IsOkAndHolds(optional_two));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", optional_one,
kPresent, missing),
IsOkAndHolds(optional_one));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", missing, kMissing,
optional_two),
IsOkAndHolds(optional_two));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", missing, kPresent,
optional_two),
IsOkAndHolds(optional_two));
EXPECT_THAT(
InvokeOperator<int64_t>("core._presence_and_or", missing, kPresent, two),
IsOkAndHolds(two));
EXPECT_THAT(InvokeOperator<Oi64>("core._presence_and_or", optional_one,
kMissing, missing),
IsOkAndHolds(missing));
}
TEST(LogicOperatorsTest, LazyPresenceAndOrFunctor) {
auto as_fn = [](auto x) { return [x]() { return x; }; };
auto as_no_call_fn = [](auto x) {
return [x]() {
ADD_FAILURE() << "function shouldn't be called";
return x;
};
};
EXPECT_EQ(PresenceAndOrOp{}(one, kPresent, as_no_call_fn(two)), one);
EXPECT_EQ(PresenceAndOrOp{}(one, kMissing, as_fn(two)), two);
EXPECT_EQ(
PresenceAndOrOp{}(optional_one, kPresent, as_no_call_fn(optional_two)),
optional_one);
EXPECT_EQ(PresenceAndOrOp{}(optional_one, kMissing, as_fn(optional_two)),
optional_two);
EXPECT_EQ(PresenceAndOrOp{}(optional_one, kPresent, as_no_call_fn(missing)),
optional_one);
EXPECT_EQ(PresenceAndOrOp{}(missing, kMissing, as_fn(optional_two)),
optional_two);
EXPECT_EQ(PresenceAndOrOp{}(missing, kPresent, as_fn(optional_two)),
optional_two);
EXPECT_EQ(PresenceAndOrOp{}(missing, kPresent, as_fn(two)), two);
EXPECT_EQ(PresenceAndOrOp{}(optional_one, kMissing, as_fn(missing)), missing);
}
TEST(LogicOperatorsTest, LazyPresenceAndOrWithStatusFunctor) {
auto as_fn = [](auto x) {
return [x]() { return absl::StatusOr<std::decay_t<decltype(x)>>(x); };
};
EXPECT_THAT(PresenceAndOrOp{}(one, kPresent, as_fn(two)), IsOkAndHolds(one));
EXPECT_THAT(PresenceAndOrOp{}(one, kMissing, as_fn(two)), IsOkAndHolds(two));
EXPECT_THAT(PresenceAndOrOp{}(optional_one, kPresent, as_fn(optional_two)),
IsOkAndHolds(optional_one));
EXPECT_THAT(PresenceAndOrOp{}(optional_one, kMissing, as_fn(optional_two)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceAndOrOp{}(optional_one, kPresent, as_fn(missing)),
IsOkAndHolds(optional_one));
EXPECT_THAT(PresenceAndOrOp{}(missing, kMissing, as_fn(optional_two)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceAndOrOp{}(missing, kPresent, as_fn(optional_two)),
IsOkAndHolds(optional_two));
EXPECT_THAT(PresenceAndOrOp{}(missing, kPresent, as_fn(two)),
IsOkAndHolds(two));
EXPECT_THAT(PresenceAndOrOp{}(optional_one, kMissing, as_fn(missing)),
IsOkAndHolds(missing));
auto error_fn = []() {
return absl::StatusOr<OptionalUnit>(absl::InternalError("fake"));
};
EXPECT_THAT(PresenceAndOrOp{}(kMissing, kMissing, error_fn),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
EXPECT_THAT(PresenceAndOrOp{}(kPresent, kMissing, error_fn),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
EXPECT_THAT(PresenceAndOrOp{}(kPresent, kPresent, error_fn),
IsOkAndHolds(kPresent));
}
TEST(LogicOperatorsTest, PresenceAnd) {
EXPECT_THAT(InvokeOperator<int64_t>("core.presence_and", one, kUnit),
IsOkAndHolds(one));
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_and", kPresent, kPresent),
IsOkAndHolds(kPresent));
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_and", kPresent, kMissing),
IsOkAndHolds(kMissing));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_and", one, kPresent),
IsOkAndHolds(optional_one));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_and", one, kMissing),
IsOkAndHolds(missing));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_and", missing, kPresent),
IsOkAndHolds(missing));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_and", optional_one, kPresent),
IsOkAndHolds(optional_one));
EXPECT_THAT(InvokeOperator<Oi64>("core.presence_and", optional_one, kMissing),
IsOkAndHolds(missing));
}
TEST(LogicOperatorsTest, LazyPresenceAndFunctor) {
auto as_fn = [](auto x) { return [x]() { return x; }; };
auto as_no_call_fn = [](auto x) {
return [x]() {
ADD_FAILURE() << "function shouldn't be called";
return x;
};
};
EXPECT_EQ(PresenceAndOp{}(as_fn(one), kUnit), one);
EXPECT_EQ(PresenceAndOp{}(as_fn(kPresent), kPresent), kPresent);
EXPECT_EQ(PresenceAndOp{}(as_no_call_fn(kPresent), kMissing), kMissing);
EXPECT_EQ(PresenceAndOp{}(as_fn(one), kPresent), optional_one);
EXPECT_EQ(PresenceAndOp{}(as_no_call_fn(one), kMissing), missing);
EXPECT_EQ(PresenceAndOp{}(as_fn(missing), kPresent), missing);
EXPECT_EQ(PresenceAndOp{}(as_fn(optional_one), kPresent), optional_one);
EXPECT_EQ(PresenceAndOp{}(as_no_call_fn(optional_one), kMissing), missing);
}
TEST(LogicOperatorsTest, LazyPresenceAndWithStatusFunctor) {
auto as_fn = [](auto x) {
return [x]() { return absl::StatusOr<std::decay_t<decltype(x)>>(x); };
};
EXPECT_THAT(PresenceAndOp{}(as_fn(one), kUnit), IsOkAndHolds(one));
EXPECT_THAT(PresenceAndOp{}(as_fn(kPresent), kPresent),
IsOkAndHolds(kPresent));
EXPECT_THAT(PresenceAndOp{}(as_fn(kPresent), kMissing),
IsOkAndHolds(kMissing));
EXPECT_THAT(PresenceAndOp{}(as_fn(one), kPresent),
IsOkAndHolds(optional_one));
EXPECT_THAT(PresenceAndOp{}(as_fn(one), kMissing), IsOkAndHolds(missing));
EXPECT_THAT(PresenceAndOp{}(as_fn(missing), kPresent), IsOkAndHolds(missing));
EXPECT_THAT(PresenceAndOp{}(as_fn(optional_one), kPresent),
IsOkAndHolds(optional_one));
EXPECT_THAT(PresenceAndOp{}(as_fn(optional_one), kMissing),
IsOkAndHolds(missing));
auto error_fn = []() {
return absl::StatusOr<OptionalUnit>(absl::InternalError("fake"));
};
EXPECT_THAT(PresenceAndOp{}(error_fn, kPresent),
StatusIs(absl::StatusCode::kInternal, HasSubstr("fake")));
EXPECT_THAT(PresenceAndOp{}(error_fn, kMissing), IsOkAndHolds(kMissing));
}
TEST(LogicOperatorsTest, PresenceNot) {
EXPECT_THAT(
InvokeOperator<OptionalUnit>("core.presence_not._builtin", kPresent),
IsOkAndHolds(kMissing));
EXPECT_THAT(InvokeOperator<OptionalUnit>("core.presence_not._builtin",
OptionalValue<float>{0.0f}),
IsOkAndHolds(kMissing));
EXPECT_THAT(InvokeOperator<OptionalUnit>("core.presence_not._builtin",
OptionalValue<float>{}),
IsOkAndHolds(kPresent));
}
#define EXPECT_LOGIC_OPERATOR(op_name, lhs, rhs, result) \
EXPECT_THAT(InvokeOperator<OptionalUnit>(op_name, lhs, rhs), \
IsOkAndHolds(result));
TEST(LogicOperatorsTest, MaskEqual) {
Text foo("foo");
Text bar("bar");
OptionalValue<Text> optional_foo = Text("foo");
OptionalValue<Text> optional_bar = Text("bar");
OptionalValue<Text> missing_text;
const std::string op_name = "core.equal";
EXPECT_LOGIC_OPERATOR(op_name, one, one, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, one, two, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_one, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_two, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, missing, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, foo, bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, missing_text, kMissing);
}
TEST(LogicOperatorsTest, MaskNotEqual) {
Text foo("foo");
Text bar("bar");
OptionalValue<Text> optional_foo = Text("foo");
OptionalValue<Text> optional_bar = Text("bar");
OptionalValue<Text> missing_text;
const std::string op_name = "core.not_equal";
EXPECT_LOGIC_OPERATOR(op_name, one, one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, one, two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, missing, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, foo, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, bar, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_foo, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_bar, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, missing_text, kMissing);
}
TEST(LogicOperatorsTest, MaskLess) {
Text foo("foo");
Text bar("bar");
OptionalValue<Text> optional_foo = Text("foo");
OptionalValue<Text> optional_bar = Text("bar");
OptionalValue<Text> missing_text;
const std::string op_name = "core.less";
EXPECT_LOGIC_OPERATOR(op_name, one, one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, one, two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, two, one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_two, optional_one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, missing, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, foo, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, bar, foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_foo, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_bar, optional_foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, missing_text, kMissing);
}
TEST(LogicOperatorsTest, MaskLessEqual) {
Text foo("foo");
Text bar("bar");
OptionalValue<Text> optional_foo = Text("foo");
OptionalValue<Text> optional_bar = Text("bar");
OptionalValue<Text> missing_text;
const std::string op_name = "core.less_equal";
EXPECT_LOGIC_OPERATOR(op_name, one, one, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, one, two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, two, one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_two, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, optional_one, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_two, optional_one, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_one, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, missing, missing, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, foo, foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, foo, bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, bar, foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, optional_bar, kMissing);
EXPECT_LOGIC_OPERATOR(op_name, optional_bar, optional_foo, kPresent);
EXPECT_LOGIC_OPERATOR(op_name, optional_foo, missing_text, kMissing);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/core/logic_operators.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/core/logic_operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
56e50346-9b28-42eb-bd12-b78a59d61181 | cpp | tensorflow/tensorflow | ef57 | third_party/xla/xla/ef57.cc | third_party/xla/xla/ef57_test.cc | #include "xla/ef57.h"
#include <limits>
#include <tuple>
#include "absl/types/span.h"
#include "xla/compiler_macros.h"
#include "tsl/platform/logging.h"
#ifdef XLA_HAS_SSE2
#include <immintrin.h>
#endif
#if defined(XLA_HAS_ARM_NEON) && defined(XLA_HAS_ARM64)
#include <arm_neon.h>
#endif
namespace xla {
void ConvertF64ToEf57(absl::Span<const double> input,
absl::Span<float> output) {
DCHECK_EQ(input.size() * 2, output.size());
#ifdef __AVX__
constexpr int kDoublesPerAvxIteration = sizeof(__m256d) / sizeof(double);
constexpr int kFloatsPerSseRegister = sizeof(__m128) / sizeof(float);
while (input.size() >= kDoublesPerAvxIteration) {
__m256d x = _mm256_loadu_pd(input.data());
__m128 x_hi_f32 = _mm256_cvtpd_ps(x);
__m256d x_hi_f64 = _mm256_cvtps_pd(x_hi_f32);
__m256d x_lo_f64 = _mm256_sub_pd(x, x_hi_f64);
__m128 x_lo_f32 = _mm256_cvtpd_ps(x_lo_f64);
const __m128 inf = _mm_set1_ps(std::numeric_limits<float>::infinity());
__m128 x_hi_exponent = _mm_and_ps(x_hi_f32, inf);
__m128 x_is_finite = _mm_cmplt_ps(x_hi_exponent, inf);
x_lo_f32 = _mm_and_ps(x_lo_f32, x_is_finite);
_mm_storeu_ps(output.data(), _mm_unpacklo_ps(x_hi_f32, x_lo_f32));
output.remove_prefix(kFloatsPerSseRegister);
_mm_storeu_ps(output.data(), _mm_unpackhi_ps(x_hi_f32, x_lo_f32));
output.remove_prefix(kFloatsPerSseRegister);
input.remove_prefix(kDoublesPerAvxIteration);
}
#endif
#ifdef XLA_HAS_SSE2
constexpr int kDoublesPerSseIteration = sizeof(__m128d) / sizeof(double);
constexpr int kFloatsPerSseIteration = sizeof(__m128) / sizeof(float);
while (input.size() >= kDoublesPerSseIteration) {
__m128d x = _mm_loadu_pd(input.data());
__m128 x_hi_f32 = _mm_cvtpd_ps(x);
__m128d x_hi_f64 = _mm_cvtps_pd(x_hi_f32);
__m128d x_lo_f64 = _mm_sub_pd(x, x_hi_f64);
__m128 x_lo_f32 = _mm_cvtpd_ps(x_lo_f64);
const __m128 inf = _mm_set1_ps(std::numeric_limits<float>::infinity());
__m128 x_hi_exponent = _mm_and_ps(x_hi_f32, inf);
__m128 x_is_finite = _mm_cmplt_ps(x_hi_exponent, inf);
x_lo_f32 = _mm_and_ps(x_lo_f32, x_is_finite);
__m128 to_store = _mm_unpacklo_ps(x_hi_f32, x_lo_f32);
_mm_storeu_ps(output.data(), to_store);
input.remove_prefix(kDoublesPerSseIteration);
output.remove_prefix(kFloatsPerSseIteration);
}
#endif
#if defined(XLA_HAS_ARM_NEON) && defined(XLA_HAS_ARM64)
constexpr int kDoublesPerNeonIteration = sizeof(float64x2_t) / sizeof(double);
constexpr int kFloatsPerNeonIteration = sizeof(float32x2x2_t) / sizeof(float);
while (input.size() >= kDoublesPerNeonIteration) {
float64x2_t x = vld1q_f64(input.data());
float32x2_t x_hi_f32 = vcvt_f32_f64(x);
float64x2_t x_hi_f64 = vcvt_f64_f32(x_hi_f32);
float64x2_t x_lo_f64 = vsubq_f64(x, x_hi_f64);
float32x2_t x_lo_f32 = vcvt_f32_f64(x_lo_f64);
uint32x2_t x_is_finite =
vcalt_f32(x_hi_f32, vdup_n_f32(std::numeric_limits<float>::infinity()));
x_lo_f32 = vreinterpret_f32_u32(
vand_u32(vreinterpret_u32_f32(x_lo_f32), x_is_finite));
float32x2x2_t to_store;
to_store.val[0] = x_hi_f32;
to_store.val[1] = x_lo_f32;
vst2_f32(output.data(), to_store);
input.remove_prefix(kDoublesPerNeonIteration);
output.remove_prefix(kFloatsPerNeonIteration);
}
#endif
while (input.size() >= 1) {
std::tie(output[0], output[1]) = SplitF64ToF32(input.front());
input.remove_prefix(1);
output.remove_prefix(2);
}
}
} | #include "xla/ef57.h"
#include <cmath>
#include <limits>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log_streamer.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "xla/test.h"
namespace xla {
namespace {
TEST(Ef57Test, DoubleMax) {
auto [high, low] = SplitF64ToF32(std::numeric_limits<double>::max());
EXPECT_EQ(high, std::numeric_limits<float>::infinity());
EXPECT_EQ(low, 0.0f);
}
TEST(Ef57Test, Overflow) {
auto [high, low] = SplitF64ToF32(0x1.ffffffp+127);
EXPECT_EQ(high, std::numeric_limits<float>::infinity());
EXPECT_EQ(low, 0.0f);
}
TEST(Ef57Test, CheckPrecision) {
auto [high, low] = SplitF64ToF32(2.0 - 0x1p-52);
EXPECT_EQ(high, 2.0f);
EXPECT_EQ(low, -0x1p-52f);
}
TEST(Ef57Test, SimpleArray) {
std::vector<double> inputs(127);
absl::BitGen gen;
for (double& input : inputs) {
input = absl::Uniform<float>(gen, 0.0f, 1.0f);
}
std::vector<float> outputs(inputs.size() * 2);
ConvertF64ToEf57(inputs, absl::MakeSpan(outputs));
for (int i = 0; i < inputs.size(); ++i) {
EXPECT_EQ(outputs[i * 2], inputs[i]);
EXPECT_EQ(outputs[i * 2 + 1], 0.0f);
}
}
TEST(Ef57Test, RelativeSplit) {
const float distance = std::scalbnf(1.0f, std::numeric_limits<float>::digits);
std::vector<double> inputs(127);
absl::BitGen gen;
for (double& input : inputs) {
input = absl::Uniform<double>(gen, 0.0, 1.0);
}
std::vector<float> outputs(inputs.size() * 2);
ConvertF64ToEf57(inputs, absl::MakeSpan(outputs));
for (int i = 0; i < outputs.size(); i += 2) {
auto most_significant = outputs[i];
auto least_significant = outputs[i + 1];
auto most_significant_mag = std::fabs(most_significant);
auto least_significant_mag = std::fabs(least_significant);
EXPECT_FALSE(std::isnan(most_significant_mag));
if (most_significant_mag == 0.0f) {
EXPECT_EQ(least_significant_mag, 0.0f);
} else {
EXPECT_GT(most_significant_mag, least_significant_mag * distance);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ef57.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/ef57_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d19e6e02-4edc-4ad0-84de-f8a2f4b058e1 | cpp | tensorflow/tensorflow | autotune_buffer_sizes | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.cc | tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes_test.cc | #include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBufferSizeMin[] = "buffer_size_min";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
constexpr std::array<const char*, 8> kAsyncDatasetOps = {
"ExperimentalMapAndBatchDataset",
"MapAndBatchDataset",
"ParallelBatchDataset",
"ParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDataset",
"ParallelMapDatasetV2",
};
}
Status AutotuneBufferSizes::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization autotune_buffer_sizes is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
NodeDef* autotune_value =
graph_utils::AddScalarConstNode(data::model::kAutotune, &graph);
absl::flat_hash_set<string> already_prefetched;
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
NodeDef* buffer_size_node = graph.GetNode(node.input(1));
if (buffer_size_node->op() == "Const") {
int64_t initial_buffer_size =
buffer_size_node->attr().at("value").tensor().int64_val(0);
if (initial_buffer_size != data::model::kAutotune) {
TF_RETURN_IF_ERROR(graph.UpdateFanin(node.name(),
{buffer_size_node->name(), 0},
{autotune_value->name(), 0}));
node.mutable_attr()->at(kBufferSizeMin).set_i(initial_buffer_size);
stats->num_changes++;
}
} else {
return absl::FailedPreconditionError(
"The autotune_buffer_sizes rewrite does not currently support "
"non-constant buffer_size input.");
}
NodeDef* prefetched_node = graph_utils::GetInputNode(node, graph);
if (prefetched_node) {
already_prefetched.insert(prefetched_node->name());
}
}
}
std::vector<const NodeDef*> async_datasets;
for (const NodeDef& node : item.graph.node()) {
if (already_prefetched.find(node.name()) != already_prefetched.end()) {
continue;
}
for (const auto& async_dataset_op : kAsyncDatasetOps) {
if (node.op() == async_dataset_op) {
async_datasets.push_back(&node);
stats->num_changes++;
break;
}
}
}
if (async_datasets.empty()) return absl::OkStatus();
for (const NodeDef* async_dataset_node : async_datasets) {
NodeDef prefetch_node;
graph_utils::SetUniqueGraphNodeName(
strings::StrCat("inject/prefetch_", async_dataset_node->name()),
graph.graph(), &prefetch_node);
prefetch_node.set_op(kPrefetchDataset);
*prefetch_node.mutable_input()->Add() = async_dataset_node->name();
*prefetch_node.mutable_input()->Add() = autotune_value->name();
graph_utils::CopyShapesAndTypesAttrs(*async_dataset_node, &prefetch_node);
auto* added_node = graph.AddNode(std::move(prefetch_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(async_dataset_node->name(), added_node->name()));
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(AutotuneBufferSizes, "autotune_buffer_sizes");
}
} | #include "tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithAutotuneBufferSizes(const GrapplerItem &item,
GraphDef *output, bool autotune) {
AutotuneBufferSizes optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class SimpleInject : public ::testing::TestWithParam<string> {};
TEST_P(SimpleInject, AutotuneBufferSizesTest) {
const string async_dataset = GetParam();
using test::function::NDef;
GrapplerItem item;
if (async_dataset == "map") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode(
"map", "range", "num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "interleave") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
} else if (async_dataset == "map_and_batch") {
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 32}, {"dtype", DT_INT64}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT64}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeMapAndBatchNode(
"map_and_batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "XTimesTwo")},
{
test::function::XTimesTwo(),
});
}
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("PrefetchDataset", output));
int index = graph_utils::FindGraphNodeWithOp("PrefetchDataset", output);
const NodeDef prefetch_node = output.node(index);
EXPECT_TRUE(prefetch_node.attr().find("legacy_autotune") ==
prefetch_node.attr().end());
EXPECT_EQ(prefetch_node.input_size(), 2);
NodeDef async_node = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(0), output));
EXPECT_EQ(async_node.name(), async_dataset);
NodeDef buffer_size_val = output.node(
graph_utils::FindGraphNodeWithName(prefetch_node.input(1), output));
EXPECT_EQ(buffer_size_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, SimpleInject,
::testing::Values("map", "interleave",
"map_and_batch"));
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, AutotuneBufferSizesTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("PrefetchDataset", output),
autotune);
}
class MultipleNodes
: public ::testing::TestWithParam<std::tuple<bool, int64_t>> {};
TEST_P(MultipleNodes, AutotuneBufferSizesTest) {
const bool legacy_autotune = std::get<0>(GetParam());
const int64_t initial_buffer_size = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> map_inputs1(2);
map_inputs1[0] = range_node->name();
map_inputs1[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> map_attrs(4);
AttrValue attr_val;
SetAttrValue("value", &attr_val);
map_attrs[0] = std::make_pair("f", attr_val);
map_attrs[1] = std::make_pair("Targuments", attr_val);
map_attrs[2] = std::make_pair("output_types", attr_val);
map_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *map_node1 = graph_utils::AddNode("map1", "ParallelMapDatasetV2",
map_inputs1, map_attrs, &graph);
NodeDef *buffer_size_val =
graph_utils::AddScalarConstNode<int64_t>(initial_buffer_size, &graph);
std::vector<string> prefetch_inputs(2);
prefetch_inputs[0] = map_node1->name();
prefetch_inputs[1] = buffer_size_val->name();
std::vector<std::pair<string, AttrValue>> prefetch_attrs(4);
AttrValue legacy_autotune_attr;
SetAttrValue(legacy_autotune, &legacy_autotune_attr);
AttrValue buffer_size_min_attr;
SetAttrValue(0, &buffer_size_min_attr);
prefetch_attrs[0] = std::make_pair("legacy_autotune", legacy_autotune_attr);
prefetch_attrs[1] = std::make_pair("buffer_size_min", buffer_size_min_attr);
prefetch_attrs[2] = std::make_pair("output_types", attr_val);
prefetch_attrs[3] = std::make_pair("output_shapes", attr_val);
NodeDef *prefetch_node = graph_utils::AddNode(
"prefetch", "PrefetchDataset", prefetch_inputs, prefetch_attrs, &graph);
std::vector<string> map_inputs2(2);
map_inputs2[0] = prefetch_node->name();
map_inputs2[1] = parallelism_val->name();
NodeDef *map_node2 = graph_utils::AddNode("map2", "ParallelMapDatasetV2",
map_inputs2, map_attrs, &graph);
std::vector<string> map_inputs3(1);
map_inputs3[0] = map_node2->name();
graph_utils::AddNode("map3", "MapDataset", map_inputs3, map_attrs, &graph);
GraphDef output;
TF_ASSERT_OK(OptimizeWithAutotuneBufferSizes(item, &output, true));
std::vector<int> prefetch_indices =
graph_utils::FindAllGraphNodesWithOp("PrefetchDataset", output);
EXPECT_EQ(prefetch_indices.size(), 2);
NodeDef new_map_node3 =
output.node(graph_utils::FindGraphNodeWithName("map3", output));
NodeDef new_prefetch_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node3.input(0), output));
EXPECT_EQ(new_prefetch_node2.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node2.input_size(), 2);
EXPECT_TRUE(new_prefetch_node2.attr().find("legacy_autotune") ==
new_prefetch_node2.attr().end());
EXPECT_TRUE(new_prefetch_node2.attr().find("buffer_size_min") ==
new_prefetch_node2.attr().end());
NodeDef new_buffer_size_val2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(1), output));
EXPECT_EQ(new_buffer_size_val2.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node2 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node2.input(0), output));
EXPECT_EQ(new_map_node2.name(), "map2");
NodeDef new_prefetch_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_map_node2.input(0), output));
EXPECT_EQ(new_prefetch_node1.op(), "PrefetchDataset");
EXPECT_EQ(new_prefetch_node1.input_size(), 2);
EXPECT_EQ(new_prefetch_node1.attr().at("legacy_autotune").b(),
legacy_autotune);
EXPECT_EQ(new_prefetch_node1.attr().at("buffer_size_min").i(),
(initial_buffer_size == -1 ? 0 : initial_buffer_size));
NodeDef new_buffer_size_val1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(1), output));
EXPECT_EQ(new_buffer_size_val1.attr().at("value").tensor().int64_val(0), -1);
NodeDef new_map_node1 = output.node(
graph_utils::FindGraphNodeWithName(new_prefetch_node1.input(0), output));
EXPECT_EQ(new_map_node1.name(), "map1");
}
INSTANTIATE_TEST_SUITE_P(Test, MultipleNodes,
::testing::Combine(::testing::Values(true, false),
::testing::Values(-1, 3)));
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/autotune_buffer_sizes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ceb9b732-936e-47e9-ba8d-ae0b7dd21e7e | cpp | tensorflow/tensorflow | compactptrset | tensorflow/core/lib/gtl/compactptrset.h | third_party/xla/xla/tsl/lib/gtl/compactptrset_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_COMPACTPTRSET_H_
#define TENSORFLOW_CORE_LIB_GTL_COMPACTPTRSET_H_
#include "xla/tsl/lib/gtl/compactptrset.h"
namespace tensorflow {
namespace gtl {
using ::tsl::gtl::CompactPointerSet;
}
}
#endif | #include "xla/tsl/lib/gtl/compactptrset.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace gtl {
namespace {
typedef CompactPointerSet<const char*> StringSet;
static std::vector<const char*> SortedContents(const StringSet& set) {
std::vector<const char*> contents(set.begin(), set.end());
std::sort(contents.begin(), contents.end());
return contents;
}
TEST(CompactPointerSetTest, Simple) {
string data = "ABCDEFG";
const char* a = &data[0];
const char* b = &data[1];
const char* c = &data[2];
const char* d = &data[3];
const char* e = &data[4];
const char* f = &data[5];
const char* g = &data[6];
for (const auto& list : std::vector<std::vector<const char*>>({{
{},
{a},
{b},
{nullptr},
{a, b, c, d, e, f, g},
}})) {
LOG(INFO) << list.size();
StringSet set;
ASSERT_TRUE(set.empty());
for (auto p : list) {
ASSERT_EQ(set.count(p), 0);
ASSERT_TRUE(set.insert(p).second);
ASSERT_EQ(set.count(p), 1);
ASSERT_TRUE(set.find(p) != set.end());
}
ASSERT_EQ(set.size(), list.size());
ASSERT_EQ(SortedContents(set), list);
{
StringSet set2(set);
ASSERT_EQ(SortedContents(set2), list);
}
for (const auto& initial : std::vector<std::vector<const char*>>({{
{},
{a},
{b},
{nullptr},
{a, b, c, d},
}})) {
StringSet dst;
for (auto p : initial) {
dst.insert(p);
}
ASSERT_EQ(dst.size(), initial.size());
dst = set;
ASSERT_EQ(SortedContents(dst), list);
dst.clear();
ASSERT_EQ(dst.size(), 0);
}
for (auto p : list) {
ASSERT_EQ(set.erase(p), 1);
ASSERT_EQ(set.erase(p), 0);
}
ASSERT_TRUE(set.empty());
ASSERT_EQ(set.size(), 0);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/compactptrset.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/gtl/compactptrset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5bd54841-8a92-4042-9218-e8e29fc3ef54 | cpp | tensorflow/tensorflow | generate_testspec | tensorflow/lite/testing/generate_testspec.cc | tensorflow/lite/testing/generate_testspec_test.cc | #include "tensorflow/lite/testing/generate_testspec.h"
#include <iostream>
#include <random>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/testing/join.h"
#include "tensorflow/lite/testing/split.h"
#include "tensorflow/lite/testing/test_runner.h"
#include "tensorflow/lite/testing/tf_driver.h"
#include "tensorflow/lite/testing/tflite_driver.h"
namespace tflite {
namespace testing {
namespace {
template <typename T, typename RandomEngine, typename RandomDistribution>
void GenerateCsv(const string& name, const std::vector<int>& shape,
RandomEngine* engine, RandomDistribution distribution,
std::pair<string, string>* out) {
std::vector<T> data =
GenerateRandomTensor<T>(shape, [&]() { return distribution(*engine); });
*out = std::make_pair(name, Join(data.data(), data.size(), ","));
}
template <typename RandomEngine>
std::vector<std::pair<string, string>> GenerateInputValues(
RandomEngine* engine, const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape) {
std::vector<std::pair<string, string>> input_values;
input_values.resize(input_layer.size());
for (int i = 0; i < input_layer.size(); i++) {
tensorflow::DataType type;
CHECK(DataTypeFromString(input_layer_type[i], &type));
auto shape = Split<int>(input_layer_shape[i], ",");
const auto& name = input_layer[i];
switch (type) {
case tensorflow::DT_FLOAT:
GenerateCsv<float>(name, shape, engine,
std::uniform_real_distribution<float>(-0.5, 0.5),
&input_values[i]);
break;
case tensorflow::DT_UINT8:
GenerateCsv<uint8_t>(name, shape, engine,
std::uniform_int_distribution<uint32_t>(0, 255),
&input_values[i]);
break;
case tensorflow::DT_INT32:
GenerateCsv<int32_t>(name, shape, engine,
std::uniform_int_distribution<int32_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_INT64:
GenerateCsv<int64_t>(name, shape, engine,
std::uniform_int_distribution<int64_t>(-100, 100),
&input_values[i]);
break;
case tensorflow::DT_BOOL:
GenerateCsv<int>(name, shape, engine,
std::uniform_int_distribution<int>(0, 1),
&input_values[i]);
break;
default:
fprintf(stderr, "Unsupported type %d (%s) when generating testspec.\n",
type, input_layer_type[i].c_str());
input_values.clear();
return input_values;
}
}
return input_values;
}
bool GenerateTestSpecFromRunner(std::iostream& stream, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer,
TestRunner* runner) {
auto input_size = input_layer.size();
if (input_layer_shape.size() != input_size ||
input_layer_type.size() != input_size) {
fprintf(stderr,
"Input size not match. Expected %lu, got %lu input types, %lu "
"input shapes.\n",
input_size, input_layer_type.size(), input_layer_shape.size());
return false;
}
stream << "reshape {\n";
for (int i = 0; i < input_size; i++) {
const auto& name = input_layer[i];
const auto& shape = input_layer_shape[i];
stream << " input { key: \"" << name << "\" value: \"" << shape
<< "\" }\n";
}
stream << "}\n";
std::mt19937 random_engine;
for (int i = 0; i < num_invocations; ++i) {
auto input_values = GenerateInputValues(
&random_engine, input_layer, input_layer_type, input_layer_shape);
if (input_values.empty()) {
std::cerr << "Unable to generate input values for the TensorFlow model. "
"Make sure the correct values are defined for "
"input_layer, input_layer_type, and input_layer_shape."
<< std::endl;
return false;
}
runner->Invoke(input_values);
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
stream << "invoke {\n";
for (const auto& entry : input_values) {
stream << " input { key: \"" << entry.first << "\" value: \""
<< entry.second << "\" }\n";
}
for (const auto& name : output_layer) {
stream << " output { key: \"" << name << "\" value: \""
<< runner->ReadOutput(name) << "\" }\n";
if (!runner->IsValid()) {
std::cerr << runner->GetErrorMessage() << std::endl;
return false;
}
}
stream << "}\n";
}
return true;
}
}
bool GenerateTestSpecFromTensorflowModel(
std::iostream& stream, const string& tensorflow_model_path,
const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
CHECK_EQ(input_layer.size(), input_layer_type.size());
CHECK_EQ(input_layer.size(), input_layer_shape.size());
TfDriver runner(input_layer, input_layer_type, input_layer_shape,
output_layer);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.LoadModel(tensorflow_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
stream << "load_model: " << tflite_model_path << "\n";
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
bool GenerateTestSpecFromTFLiteModel(
std::iostream& stream, const string& tflite_model_path, int num_invocations,
const std::vector<string>& input_layer,
const std::vector<string>& input_layer_type,
const std::vector<string>& input_layer_shape,
const std::vector<string>& output_layer) {
TfLiteDriver runner;
runner.LoadModel(tflite_model_path);
if (!runner.IsValid()) {
std::cerr << runner.GetErrorMessage() << std::endl;
return false;
}
runner.AllocateTensors();
return GenerateTestSpecFromRunner(stream, num_invocations, input_layer,
input_layer_type, input_layer_shape,
output_layer, &runner);
}
}
} | #include "tensorflow/lite/testing/generate_testspec.h"
#include <random>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
TEST(GenerateRandomTensor, FloatValue) {
std::mt19937 random_engine;
auto random_func = [&]() {
return std::uniform_real_distribution<float>(-0.5, 0.5)(random_engine);
};
std::set<float> values;
float sum_x_square = 0.0f;
float sum_x = 0.0f;
for (int i = 0; i < 100; i++) {
const auto& data = GenerateRandomTensor<float>({1, 3, 4}, random_func);
for (float value : data) {
values.insert(value);
sum_x_square += value * value;
sum_x += value;
}
}
EXPECT_GT(values.size(), 200);
int num = 1 * 3 * 4 * 100;
float stddev = sum_x_square / num - (sum_x / num) * (sum_x / num);
float minstddev = 1.0f / 12 / 2;
EXPECT_GT(stddev, minstddev);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/generate_testspec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/generate_testspec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7337069a-bced-4adb-9f27-e26dd49f1d5e | cpp | google/arolla | typed_value | arolla/qtype/typed_value.cc | arolla/qtype/typed_value_test.cc | #include "arolla/qtype/typed_value.h"
#include <cstddef>
#include <memory>
#include <new>
#include <utility>
#include "absl/base/call_once.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/memory.h"
namespace arolla {
namespace {
template <typename TypedRef >
absl::Status CheckPreconditionsForInitCompound(
QTypePtr compound_qtype, absl::Span<const TypedRef> field_refs) {
const auto& field_slots = compound_qtype->type_fields();
if (field_slots.size() != field_refs.size()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected %d values, got %d; compound_qtype=%s", field_slots.size(),
field_refs.size(), compound_qtype->name()));
}
for (size_t i = 0; i < field_refs.size(); ++i) {
if (field_refs[i].GetType() != field_slots[i].GetType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected fields[%d]: %s, got %s; compound_qtype=%s", i,
field_slots[i].GetType()->name(), field_refs[i].GetType()->name(),
compound_qtype->name()));
}
}
return absl::OkStatus();
}
template <typename TypedRef >
void InitCompound(QTypePtr compound_qtype,
absl::Span<const TypedRef> field_refs, void* destination) {
compound_qtype->type_layout().InitializeAlignedAlloc(destination);
const auto& field_slots = compound_qtype->type_fields();
FramePtr frame(destination, &compound_qtype->type_layout());
for (size_t i = 0; i < field_refs.size(); ++i) {
const auto& field_ref = field_refs[i];
field_ref.GetType()->UnsafeCopy(
field_ref.GetRawPointer(),
frame.GetRawPointer(field_slots[i].byte_offset()));
}
}
}
TypedValue::Impl* TypedValue::AllocRawImpl(QTypePtr qtype) {
const auto& type_layout = qtype->type_layout();
const size_t alignment = type_layout.AllocAlignment().value;
size_t extra_space = type_layout.AllocSize() + alignment;
void* buffer = ::operator new(sizeof(Impl) + extra_space);
Impl* impl = new (buffer) Impl;
impl->qtype = qtype;
impl->data = static_cast<char*>(buffer) + sizeof(Impl);
void* tmp = std::align(alignment, extra_space, impl->data, extra_space);
DCHECK_NE(tmp, nullptr);
return impl;
}
TypedValue::Impl* TypedValue::AllocImpl(QTypePtr qtype, const void* value) {
auto* impl = AllocRawImpl(qtype);
qtype->type_layout().InitializeAlignedAlloc(impl->data);
qtype->UnsafeCopy(value, impl->data);
return impl;
}
TypedValue TypedValue::UnsafeFromTypeDefaultConstructed(QTypePtr qtype) {
auto* impl = AllocRawImpl(qtype);
qtype->type_layout().InitializeAlignedAlloc(impl->data);
return TypedValue(impl);
}
absl::StatusOr<TypedValue> TypedValue::FromFields(
QTypePtr compound_qtype, absl::Span<const TypedRef> fields) {
if (auto status = CheckPreconditionsForInitCompound(compound_qtype, fields);
!status.ok()) {
return status;
}
auto* impl = AllocRawImpl(compound_qtype);
InitCompound(compound_qtype, fields, impl->data);
return TypedValue(impl);
}
absl::StatusOr<TypedValue> TypedValue::FromFields(
QTypePtr compound_qtype, absl::Span<const TypedValue> fields) {
if (auto status = CheckPreconditionsForInitCompound(compound_qtype, fields);
!status.ok()) {
return status;
}
auto* impl = AllocRawImpl(compound_qtype);
InitCompound(compound_qtype, fields, impl->data);
return TypedValue(impl);
}
const Fingerprint& TypedValue::GetFingerprint() const {
absl::call_once(impl_->fingerprint_once, [impl = impl_] {
FingerprintHasher hasher("TypedValue");
hasher.Combine(impl->qtype);
impl->qtype->UnsafeCombineToFingerprintHasher(impl->data, &hasher);
impl->fingerprint = std::move(hasher).Finish();
});
return impl_->fingerprint;
}
} | #include "arolla/qtype/typed_value.h"
#include <cstdint>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fingerprint.h"
struct WithoutQTypeTraits {};
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
TEST(TypedValueTest, ReprBasic) {
EXPECT_EQ(TypedValue::FromValue<bool>(true).Repr(), "true");
EXPECT_EQ(TypedValue::FromValue<int32_t>(5).Repr(), "5");
EXPECT_EQ(TypedValue::FromValue<int64_t>(5).Repr(), "int64{5}");
EXPECT_EQ(TypedValue::FromValue<uint64_t>(5).Repr(), "uint64{5}");
EXPECT_EQ(TypedValue::FromValue<float>(5.0f).Repr(), "5.");
EXPECT_EQ(TypedValue::FromValue<double>(5.0).Repr(), "float64{5}");
}
TEST(TypedValueTest, FromValue) {
auto tval = TypedValue::FromValue<int64_t>(1);
EXPECT_THAT(tval.GetType(), Eq(GetQType<int64_t>()));
EXPECT_THAT(tval.As<int64_t>(), IsOkAndHolds(int64_t{1}));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(TypedValueTest, As) {
auto int_value = TypedValue::FromValue<double>(1.0);
EXPECT_THAT(int_value.As<double>(), IsOkAndHolds(1.0));
EXPECT_THAT(int_value.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `float`")));
EXPECT_THAT(int_value.As<WithoutQTypeTraits>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `WithoutQTypeTraits`")));
}
TEST(TypedValueTest, FromValueWithQType) {
auto f64 = GetQType<double>();
absl::StatusOr<TypedValue> tmp = TypedValue::FromValueWithQType(1.0, f64);
auto tval = std::move(tmp).value();
EXPECT_THAT(tval.GetType(), Eq(f64));
EXPECT_THAT(tval.As<double>(), IsOkAndHolds(1.0));
EXPECT_THAT(tval.As<float>(),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("type mismatch: expected C++ type `double` "
"(FLOAT64), got `float`")));
EXPECT_THAT(TypedValue::FromValueWithQType(1.0f, f64).status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(TypedValueTest, UnsafeFromTypeDefaultConstructed) {
{
auto f64 = GetQType<double>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(f64);
EXPECT_THAT(tval.GetType(), Eq(GetQType<double>()));
EXPECT_THAT(tval.As<double>(), IsOkAndHolds(double{0.}));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
{
auto bytes = GetQType<Bytes>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(bytes);
EXPECT_THAT(tval.GetType(), Eq(GetQType<Bytes>()));
ASSERT_OK_AND_ASSIGN(auto val_ref, tval.As<Bytes>());
EXPECT_THAT(val_ref.get(), Eq(Bytes()));
EXPECT_THAT(tval.As<float>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
{
auto of64 = GetQType<OptionalValue<double>>();
auto tval = TypedValue::UnsafeFromTypeDefaultConstructed(of64);
EXPECT_THAT(tval.GetType(), Eq(GetQType<OptionalValue<double>>()));
ASSERT_OK_AND_ASSIGN(auto val_ref, tval.As<OptionalValue<double>>());
EXPECT_THAT(val_ref.get(), Eq(OptionalValue<double>()));
EXPECT_THAT(tval.As<OptionalValue<float>>().status(),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
TEST(TypedValueTest, FromSlot) {
FrameLayout::Builder builder;
QTypePtr f32 = GetQType<float>();
TypedSlot tslot = AddSlot(f32, &builder);
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr ptr = alloc.frame();
EXPECT_OK(TypedValue::FromValue(7.5f).CopyToSlot(tslot, ptr));
auto tval = TypedValue::FromSlot(tslot, ptr);
EXPECT_THAT(tval.GetType(), Eq(f32));
EXPECT_THAT(tval.As<float>(), IsOkAndHolds(7.5f));
}
TEST(TypedValueTest, ToSlot) {
FrameLayout::Builder builder;
QTypePtr f64 = GetQType<double>();
TypedSlot tslot = AddSlot(f64, &builder);
auto layout = std::move(builder).Build();
MemoryAllocation alloc(&layout);
FramePtr ptr = alloc.frame();
auto tval = TypedValue::FromValue(double{1.0});
EXPECT_OK(tval.CopyToSlot(tslot, ptr));
auto slot = tslot.ToSlot<double>().value();
EXPECT_THAT(ptr.Get(slot), Eq(1.0));
}
TEST(TypedValueTest, Copy) {
auto tval = TypedValue::FromValue(double{1.0});
auto tval_copy = tval;
EXPECT_THAT(tval_copy.As<double>(), IsOkAndHolds(1.0));
}
TEST(TypedValueTest, FingerprintUniqueness) {
absl::flat_hash_set<Fingerprint> fingerprints;
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(int32_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(int64_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(uint64_t{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(double{0}).GetFingerprint())
.second);
EXPECT_TRUE(
fingerprints.insert(TypedValue::FromValue(float{0}).GetFingerprint())
.second);
}
TEST(TypedValueTest, FingerprintReproducibility) {
EXPECT_EQ(TypedValue::FromValue(int32_t{0}).GetFingerprint(),
TypedValue::FromValue(int32_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(int64_t{0}).GetFingerprint(),
TypedValue::FromValue(int64_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(uint64_t{0}).GetFingerprint(),
TypedValue::FromValue(uint64_t{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(float{0}).GetFingerprint(),
TypedValue::FromValue(float{0}).GetFingerprint());
EXPECT_EQ(TypedValue::FromValue(double{0}).GetFingerprint(),
TypedValue::FromValue(double{0}).GetFingerprint());
}
TEST(TypedValueTest, UnsafeAs) {
auto tval = TypedValue::FromValue<int64_t>(1);
ASSERT_THAT(tval.GetType(), Eq(GetQType<int64_t>()));
EXPECT_THAT(tval.UnsafeAs<int64_t>(), Eq(int64_t{1}));
}
TEST(TypedValueTest, CopyConstructor) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = x;
EXPECT_EQ(x.GetType(), y.GetType());
EXPECT_EQ(x.GetRawPointer(), y.GetRawPointer());
}
TEST(TypedValueTest, CopyOperator) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = TypedValue::FromValue<int64_t>(2);
y = x;
EXPECT_EQ(x.GetType(), y.GetType());
EXPECT_EQ(x.GetRawPointer(), y.GetRawPointer());
}
TEST(TypedValueTest, MoveConstructor) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
auto* x_type = x.GetType();
auto* x_raw_ptr = x.GetRawPointer();
TypedValue y = std::move(x);
EXPECT_EQ(y.GetType(), x_type);
EXPECT_EQ(y.GetRawPointer(), x_raw_ptr);
}
TEST(TypedValueTest, MoveOperator) {
TypedValue x = TypedValue::FromValue<int64_t>(1);
TypedValue y = TypedValue::FromValue<int64_t>(2);
auto* x_type = x.GetType();
auto* x_raw_ptr = x.GetRawPointer();
y = std::move(x);
EXPECT_EQ(y.GetType(), x_type);
EXPECT_EQ(y.GetRawPointer(), x_raw_ptr);
}
TEST(TypedValueTest, CopyFromValue) {
const Bytes bytes("data");
TypedValue x = TypedValue::FromValue(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, CopyFromValueT) {
const Bytes bytes("data");
TypedValue x = TypedValue::FromValue<Bytes>(bytes);
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, MoveFromValueT) {
Bytes bytes("a long string literal to ensure memory allocation");
auto* data_raw_ptr = bytes.data();
TypedValue x = TypedValue::FromValue<Bytes>(std::move(bytes));
EXPECT_EQ(x.UnsafeAs<Bytes>().data(), data_raw_ptr);
}
TEST(TypedValueTest, CopyFromValueWithQType) {
const Bytes bytes("data");
ASSERT_OK_AND_ASSIGN(
TypedValue x, TypedValue::FromValueWithQType(bytes, GetQType<Bytes>()));
ASSERT_OK_AND_ASSIGN(Bytes x_bytes, x.As<Bytes>());
EXPECT_THAT(x_bytes, Eq(bytes));
}
TEST(TypedValueTest, MoveFromValueWithQType) {
Bytes bytes("a long string literal to ensure memory allocation");
auto* data_raw_ptr = bytes.data();
ASSERT_OK_AND_ASSIGN(TypedValue x, TypedValue::FromValueWithQType(
std::move(bytes), GetQType<Bytes>()));
EXPECT_EQ(x.UnsafeAs<Bytes>().data(), data_raw_ptr);
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_value.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/typed_value_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
550b570a-3d3c-4266-988c-aca50630b989 | cpp | tensorflow/tensorflow | eigen_pooling | tensorflow/core/kernels/eigen_pooling.h | tensorflow/core/kernels/eigen_pooling_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_EIGEN_POOLING_H_
#define TENSORFLOW_CORE_KERNELS_EIGEN_POOLING_H_
#include "unsupported/Eigen/CXX11/Tensor"
namespace Eigen {
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::MaxReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>,
const TensorImagePatchOp<Dynamic, Dynamic, const Input>>>
SpatialMaxPooling(const Input& input, DenseIndex patchRows,
DenseIndex patchCols, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type,
DenseIndex in_strideRows = 1, DenseIndex in_strideCols = 1) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
const DenseIndex patchRowsEff =
patchRows + (patchRows - 1) * (in_strideRows - 1);
const DenseIndex patchColsEff =
patchCols + (patchCols - 1) * (in_strideCols - 1);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int idxRows = isColMajor ? 1 : 2;
static const int idxCols = isColMajor ? 2 : 1;
Eigen::DSizes<TensorIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRowsEff + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchColsEff + 1,
strideCols);
} else {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[3] = in.dimension(3);
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>
reduction_dims;
return input
.extract_image_patches(
patchRows, patchCols, strideRows, strideCols, in_strideRows,
in_strideCols, padding_type,
Eigen::NumTraits<std::remove_const_t<
typename internal::traits<Input>::Scalar>>::lowest())
.maximum(reduction_dims)
.reshape(post_reduce_dims);
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::MaxReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
const Eigen::IndexList<Eigen::type2index<1>>,
const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, 3>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Input>>>>
CuboidMaxPooling(const Input& input, DenseIndex patchPlanes,
DenseIndex patchRows, DenseIndex patchCols,
DenseIndex stridePlanes, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
static const int idxPlanes = isColMajor ? 1 : 3;
static const int idxRows = 2;
static const int idxCols = isColMajor ? 3 : 1;
Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)) - patchPlanes + 1,
stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRows + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchCols + 1,
strideCols);
} else {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)), stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[4] = in.dimension(4);
Eigen::DSizes<DenseIndex, 3> pre_reduce_dims;
pre_reduce_dims[1] = patchRows * patchCols * patchPlanes;
if (isColMajor) {
pre_reduce_dims[0] = post_reduce_dims[0];
pre_reduce_dims[2] = post_reduce_dims[1] * post_reduce_dims[2] *
post_reduce_dims[3] * post_reduce_dims[4];
} else {
pre_reduce_dims[0] = post_reduce_dims[0] * post_reduce_dims[1] *
post_reduce_dims[2] * post_reduce_dims[3];
pre_reduce_dims[2] = post_reduce_dims[4];
}
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
Eigen::IndexList<Eigen::type2index<1> > reduction_dims;
return input
.extract_volume_patches(patchPlanes, patchRows, patchCols, stridePlanes,
strideRows, strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reshape(pre_reduce_dims)
.maximum(reduction_dims)
.reshape(post_reduce_dims);
}
namespace internal {
template <typename T>
struct AvgPoolMeanReducer {
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
static constexpr bool PacketAccess = internal::is_same<T, float>::value;
#else
static const bool PacketAccess = false;
#endif
static constexpr bool IsStateful = true;
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE AvgPoolMeanReducer() : scalarCount_(0) {
typedef typename packet_traits<T>::type Packet;
#if defined(__HIPCC__)
packetCount_ = 0;
#else
packetCount_ = pset1<Packet>(T(0.0));
#endif
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) {
if (t != -Eigen::NumTraits<T>::highest()) {
(*accum) = (*accum) + t;
scalarCount_++;
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T initialize() const {
return static_cast<T>(0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T finalize(const T accum) const {
eigen_assert(scalarCount_ > 0);
return accum / T(scalarCount_);
}
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
#ifdef EIGEN_VECTORIZE_AVX512
#define pequal(a, b) \
_mm512_castsi512_ps( \
_mm512_maskz_set1_epi32(_mm512_cmp_ps_mask(a, b, _CMP_EQ_UQ), -1))
#define psel(a, b, false_mask) \
_mm512_castsi512_ps(_mm512_ternarylogic_epi32( \
_mm512_castps_si512(a), _mm512_castps_si512(b), \
_mm512_castps_si512(false_mask), 0xd8))
#elif defined EIGEN_VECTORIZE_AVX
#define pequal(a, b) _mm256_cmp_ps(a, b, _CMP_EQ_UQ)
#define psel(a, b, false_mask) _mm256_blendv_ps(a, b, false_mask)
#else
#define pequal(a, b) _mm_cmpeq_ps(a, b)
#define psel(a, b, false_mask) \
_mm_or_ps(_mm_andnot_ps(false_mask, a), _mm_and_ps(false_mask, b))
#endif
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacket(const Packet& p,
Packet* accum) {
reducePacketWithType(static_cast<T>(0), p, accum);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reducePacketWithType(
T, const Packet& p, Packet* accum) {
Packet skip_mask =
pequal(p, pset1<Packet>(-Eigen::NumTraits<T>::highest()));
(*accum) = padd<Packet>(*accum, psel(p, pset1<Packet>(0), skip_mask));
packetCount_ = padd<Packet>(
packetCount_, psel(pset1<Packet>(1), pset1<Packet>(0), skip_mask));
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet initializePacket() const {
return pset1<Packet>(0);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Packet
finalizePacket(const Packet& vaccum) const {
return pdiv(vaccum, packetCount_);
}
template <typename Packet>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T
finalizeBoth(const T saccum, const Packet& vaccum) const {
return (saccum + predux(vaccum)) / (scalarCount_ + predux(packetCount_));
}
#endif
protected:
typedef typename packet_traits<T>::type Packet;
int scalarCount_;
#if defined(__HIPCC__)
int packetCount_;
#else
Packet packetCount_;
#endif
};
template <typename Device>
struct reducer_traits<AvgPoolMeanReducer<float>, Device> {
enum {
Cost = 1,
#if (EIGEN_ARCH_i386 || EIGEN_ARCH_x86_64) && !defined(__CUDACC__) && \
!defined(__HIPCC__)
PacketAccess = true,
#else
PacketAccess = false,
#endif
IsStateful = true,
IsExactlyAssociative = false
};
};
template <>
struct reducer_traits<AvgPoolMeanReducer<float>, GpuDevice> {
enum {
Cost = 1,
PacketAccess = false,
IsStateful = true,
IsExactlyAssociative = false
};
};
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<typename internal::traits<Input>::Index,
internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::AvgPoolMeanReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>,
const TensorImagePatchOp<Dynamic, Dynamic, const Input>>>
SpatialAvgPooling(const Input& input, DenseIndex patchRows,
DenseIndex patchCols, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type,
DenseIndex in_strideRows = 1, DenseIndex in_strideCols = 1) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 4,
YOU_MADE_A_PROGRAMMING_MISTAKE);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
const DenseIndex patchRowsEff =
patchRows + (patchRows - 1) * (in_strideRows - 1);
const DenseIndex patchColsEff =
patchCols + (patchCols - 1) * (in_strideCols - 1);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
static const int idxRows = isColMajor ? 1 : 2;
static const int idxCols = isColMajor ? 2 : 1;
Eigen::DSizes<TensorIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRowsEff + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchColsEff + 1,
strideCols);
} else {
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[3] = in.dimension(3);
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
internal::AvgPoolMeanReducer<CoeffReturnType> mean_with_nan;
std::conditional_t<
internal::traits<Input>::Layout == ColMajor,
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>,
const Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>>>
reduction_dims;
return input
.extract_image_patches(patchRows, patchCols, strideRows, strideCols,
in_strideRows, in_strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reduce(reduction_dims, mean_with_nan)
.reshape(post_reduce_dims);
}
template <typename Input>
EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>,
const TensorReductionOp<
internal::AvgPoolMeanReducer<
std::remove_const_t<typename internal::traits<Input>::Scalar>>,
const Eigen::IndexList<Eigen::type2index<1>>,
const TensorReshapingOp<
const Eigen::DSizes<DenseIndex, 3>,
const TensorVolumePatchOp<Dynamic, Dynamic, Dynamic, const Input>>>>
CuboidAvgPooling(const Input& input, DenseIndex patchPlanes,
DenseIndex patchRows, DenseIndex patchCols,
DenseIndex stridePlanes, DenseIndex strideRows,
DenseIndex strideCols, const PaddingType padding_type) {
EIGEN_STATIC_ASSERT(internal::traits<Input>::NumDimensions == 5,
YOU_MADE_A_PROGRAMMING_MISTAKE);
static const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
typedef typename internal::traits<Input>::Index TensorIndex;
TensorRef<Tensor<typename internal::traits<Input>::Scalar,
internal::traits<Input>::NumDimensions,
internal::traits<Input>::Layout, TensorIndex> >
in(input);
static const int idxPlanes = isColMajor ? 1 : 3;
static const int idxRows = 2;
static const int idxCols = isColMajor ? 3 : 1;
Eigen::DSizes<DenseIndex, internal::traits<Input>::NumDimensions>
post_reduce_dims;
post_reduce_dims[0] = in.dimension(0);
if (padding_type == PADDING_VALID) {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)) - patchPlanes + 1,
stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)) - patchRows + 1,
strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)) - patchCols + 1,
strideCols);
} else {
post_reduce_dims[idxPlanes] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxPlanes)), stridePlanes);
post_reduce_dims[idxRows] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxRows)), strideRows);
post_reduce_dims[idxCols] = Eigen::divup(
static_cast<DenseIndex>(in.dimension(idxCols)), strideCols);
}
post_reduce_dims[4] = in.dimension(4);
Eigen::DSizes<DenseIndex, 3> pre_reduce_dims;
pre_reduce_dims[1] = patchRows * patchCols * patchPlanes;
if (isColMajor) {
pre_reduce_dims[0] = post_reduce_dims[0];
pre_reduce_dims[2] = post_reduce_dims[1] * post_reduce_dims[2] *
post_reduce_dims[3] * post_reduce_dims[4];
} else {
pre_reduce_dims[0] = post_reduce_dims[0] * post_reduce_dims[1] *
post_reduce_dims[2] * post_reduce_dims[3];
pre_reduce_dims[2] = post_reduce_dims[4];
}
typedef std::remove_const_t<typename internal::traits<Input>::Scalar>
CoeffReturnType;
internal::AvgPoolMeanReducer<CoeffReturnType> mean_with_nan;
Eigen::IndexList<Eigen::type2index<1> > reduction_dims;
return input
.extract_volume_patches(patchPlanes, patchRows, patchCols, stridePlanes,
strideRows, strideCols, padding_type,
-Eigen::NumTraits<CoeffReturnType>::highest())
.reshape(pre_reduce_dims)
.reduce(reduction_dims, mean_with_nan)
.reshape(post_reduce_dims);
}
}
#endif | #include "tensorflow/core/kernels/eigen_pooling.h"
#include "tensorflow/core/platform/test.h"
namespace Eigen {
namespace {
void EigenApprox(float a, float b) {
ASSERT_TRUE(std::abs(a - b) <= std::min(std::abs(a), std::abs(b)) * 1e-3);
}
}
TEST(EigenPoolingTest, Simple) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(depth, input_rows, input_cols, num_batches);
Tensor<float, 4> result(depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.f);
const int stride = 1;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(0), depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected, input(d, r + i, c + j, b));
}
}
if (result(d, i, j, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(d, i, j, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, b), expected);
}
}
}
}
}
TEST(EigenPoolingTest, SimpleRowMajor) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 4;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
depth);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.f);
const int stride = 1;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(3), depth);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected, input(b, c + j, r + i, d));
}
}
if (result(b, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(b, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, j, i, d), expected);
}
}
}
}
}
TEST(EigenPoolingTest, Cuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected =
(std::max)(expected, input(d, p + i, r + j, c + k, b));
}
}
}
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, CuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected =
(std::max)(expected, input(b, c + k, r + j, p + i, d));
}
}
}
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, ValidCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected_sum += input(d, p + i, r + j, c + k, b);
expected_count++;
}
}
}
const float expected = expected_sum / expected_count;
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, ValidCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = 2;
const int output_cols = 3;
const int output_planes = 4;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected_sum += input(b, c + k, r + j, p + i, d);
expected_count++;
}
}
}
const float expected = expected_sum / expected_count;
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, SameCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_SAME);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
const int pad_p = output_planes - input_planes + patch_planes - 1;
const int pad_r = output_rows - input_rows + patch_rows - 1;
const int pad_c = output_cols - input_cols + patch_cols - 1;
const int dp = pad_p / 2;
const int dr = pad_r / 2;
const int dc = pad_c / 2;
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
const int in_p = p + i - dp;
const int in_r = r + j - dr;
const int in_c = c + k - dc;
if (in_p >= 0 && in_p < input_planes && in_r >= 0 &&
in_r < input_rows && in_c >= 0 && in_c < input_cols) {
expected_sum += input(d, in_p, in_r, in_c, b);
expected_count++;
}
}
}
}
const float expected = expected_sum / expected_count;
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, SameCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 4;
const int patch_cols = 3;
const int patch_planes = 2;
const int output_rows = input_rows;
const int output_cols = input_cols;
const int output_planes = input_planes;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
result = result.constant(-1000.0f);
const int stride = 1;
result = CuboidAvgPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_SAME);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
const int pad_p = output_planes - input_planes + patch_planes - 1;
const int pad_r = output_rows - input_rows + patch_rows - 1;
const int pad_c = output_cols - input_cols + patch_cols - 1;
const int dp = pad_p / 2;
const int dr = pad_r / 2;
const int dc = pad_c / 2;
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected_sum = 0.0f;
int expected_count = 0;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
const int in_p = p + i - dp;
const int in_r = r + j - dr;
const int in_c = c + k - dc;
if (in_p >= 0 && in_p < input_planes && in_r >= 0 &&
in_r < input_rows && in_c >= 0 && in_c < input_cols) {
expected_sum += input(b, in_c, in_r, in_p, d);
expected_count++;
}
}
}
}
const float expected = expected_sum / expected_count;
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " k=" << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, Strided) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4> input(depth, input_rows, input_cols, num_batches);
Tensor<float, 4> result(depth, output_rows, output_cols, num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(0), depth);
EXPECT_EQ(result.dimension(1), output_rows);
EXPECT_EQ(result.dimension(2), output_cols);
EXPECT_EQ(result.dimension(3), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(
expected, input(d, r + stride * i, c + stride * j, b));
}
}
if (result(d, i, j, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(d, i, j, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, b), expected);
}
}
}
}
}
TEST(EigenPoolingTest, StridedRowMajor) {
const int depth = 10;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 4, RowMajor> input(num_batches, input_cols, input_rows, depth);
Tensor<float, 4, RowMajor> result(num_batches, output_cols, output_rows,
depth);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = SpatialMaxPooling(input, patch_rows, patch_cols, stride, stride,
PADDING_VALID);
EXPECT_EQ(result.dimension(3), depth);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < depth; ++d) {
for (int i = 0; i < output_rows; ++i) {
for (int j = 0; j < output_cols; ++j) {
float expected = -10000.f;
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(
expected, input(b, c + stride * j, r + stride * i, d));
}
}
if (result(b, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i << " j=" << j
<< " " << result(b, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, j, i, d), expected);
}
}
}
}
}
TEST(EigenPoolingTest, StridedCuboid) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_planes = 2;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 5> input(channels, input_planes, input_rows, input_cols,
num_batches);
Tensor<float, 5> result(channels, output_planes, output_rows, output_cols,
num_batches);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(0), channels);
EXPECT_EQ(result.dimension(1), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(3), output_cols);
EXPECT_EQ(result.dimension(4), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected,
input(d, p + stride * i, r + stride * j,
c + stride * k, b));
}
}
}
if (result(d, i, j, k, b) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " " << k << " "
<< result(d, i, j, k, b) << " vs " << expected
<< std::endl;
}
EigenApprox(result(d, i, j, k, b), expected);
}
}
}
}
}
}
TEST(EigenPoolingTest, StridedCuboidRowMajor) {
const int channels = 10;
const int input_planes = 5;
const int input_rows = 5;
const int input_cols = 5;
const int num_batches = 13;
const int patch_planes = 3;
const int patch_rows = 3;
const int patch_cols = 3;
const int output_planes = 2;
const int output_rows = 2;
const int output_cols = 2;
Tensor<float, 5, RowMajor> input(num_batches, input_cols, input_rows,
input_planes, channels);
Tensor<float, 5, RowMajor> result(num_batches, output_cols, output_rows,
output_planes, channels);
input = input.constant(11.0f) + input.random();
result.setRandom();
int stride = 2;
result = CuboidMaxPooling(input, patch_planes, patch_rows, patch_cols, stride,
stride, stride, PADDING_VALID);
EXPECT_EQ(result.dimension(4), channels);
EXPECT_EQ(result.dimension(3), output_planes);
EXPECT_EQ(result.dimension(2), output_rows);
EXPECT_EQ(result.dimension(1), output_cols);
EXPECT_EQ(result.dimension(0), num_batches);
for (int b = 0; b < num_batches; ++b) {
for (int d = 0; d < channels; ++d) {
for (int i = 0; i < output_planes; ++i) {
for (int j = 0; j < output_rows; ++j) {
for (int k = 0; k < output_cols; ++k) {
float expected = -10000.f;
for (int p = 0; p < patch_planes; ++p) {
for (int r = 0; r < patch_rows; ++r) {
for (int c = 0; c < patch_cols; ++c) {
expected = (std::max)(expected,
input(b, c + stride * k, r + stride * j,
p + stride * i, d));
}
}
}
if (result(b, k, j, i, d) != expected) {
std::cout << "at d=" << d << " b=" << b << " i=" << i
<< " j=" << j << " " << k << " "
<< result(b, k, j, i, d) << " vs " << expected
<< std::endl;
}
EigenApprox(result(b, k, j, i, d), expected);
}
}
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_pooling.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/eigen_pooling_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
11c70d79-3753-4076-a1b8-1c481f3ba170 | cpp | tensorflow/tensorflow | common | tensorflow/compiler/mlir/lite/kernels/internal/common.cc | tensorflow/lite/core/c/common_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/common.h"
namespace tflite_migration {
#if TFLITE_SINGLE_ROUNDING
int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift <= 30);
const int64_t total_shift = 31 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(quantized_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK(quantized_multiplier >= 0);
TFLITE_DCHECK(shift >= -31 && shift < 8);
TFLITE_DCHECK(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
const int32_t reduced_multiplier =
(quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
const int64_t total_shift = 15 - shift;
const int64_t round = static_cast<int64_t>(1) << (total_shift - 1);
int64_t result = x * static_cast<int64_t>(reduced_multiplier) + round;
result = result >> total_shift;
TFLITE_DCHECK(result >= std::numeric_limits<int32_t>::min() &&
result <= std::numeric_limits<int32_t>::max());
return static_cast<int32_t>(result);
}
#else
int32_t MultiplyByQuantizedMultiplier(int32_t x, int32_t quantized_multiplier,
int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
int left_shift = shift > 0 ? shift : 0;
int right_shift = shift > 0 ? 0 : -shift;
return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
x * (1 << left_shift), quantized_multiplier),
right_shift);
}
int32_t MultiplyByQuantizedMultiplier(int64_t x, int32_t quantized_multiplier,
int shift) {
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
assert(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
int total_shift = 15 - shift;
x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
int32_t result = x >> total_shift;
return result;
}
#endif
} | #include "tensorflow/lite/core/c/common.h"
#include <cstddef>
#include <cstdlib>
#include <limits>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/util.h"
namespace tflite {
using ::testing::ElementsAreArray;
TEST(IntArray, TestIntArrayCreate) {
TfLiteIntArray* a = TfLiteIntArrayCreate(0);
TfLiteIntArray* b = TfLiteIntArrayCreate(3);
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
}
TEST(IntArray, TestIntArrayCopy) {
TfLiteIntArray* a = TfLiteIntArrayCreate(2);
a->data[0] = 22;
a->data[1] = 24;
TfLiteIntArray* b = TfLiteIntArrayCopy(a);
ASSERT_NE(a, b);
ASSERT_EQ(a->size, b->size);
ASSERT_EQ(a->data[0], b->data[0]);
ASSERT_EQ(a->data[1], b->data[1]);
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
}
TEST(IntArray, TestIntArrayEqual) {
TfLiteIntArray* a = TfLiteIntArrayCreate(1);
a->data[0] = 1;
TfLiteIntArray* b = TfLiteIntArrayCreate(2);
b->data[0] = 5;
b->data[1] = 6;
TfLiteIntArray* c = TfLiteIntArrayCreate(2);
c->data[0] = 5;
c->data[1] = 6;
TfLiteIntArray* d = TfLiteIntArrayCreate(2);
d->data[0] = 6;
d->data[1] = 6;
EXPECT_FALSE(TfLiteIntArrayEqual(a, b));
EXPECT_TRUE(TfLiteIntArrayEqual(b, c));
EXPECT_TRUE(TfLiteIntArrayEqual(b, b));
EXPECT_FALSE(TfLiteIntArrayEqual(c, d));
EXPECT_FALSE(TfLiteIntArrayEqual(nullptr, a));
EXPECT_FALSE(TfLiteIntArrayEqual(a, nullptr));
EXPECT_TRUE(TfLiteIntArrayEqual(nullptr, nullptr));
TfLiteIntArrayFree(a);
TfLiteIntArrayFree(b);
TfLiteIntArrayFree(c);
TfLiteIntArrayFree(d);
}
TEST(FloatArray, TestFloatArrayCreate) {
TfLiteFloatArray* a = TfLiteFloatArrayCreate(0);
TfLiteFloatArray* b = TfLiteFloatArrayCreate(3);
TfLiteFloatArrayFree(a);
TfLiteFloatArrayFree(b);
}
TEST(FloatArray, TestFloatArrayCopy) {
TfLiteFloatArray* a = TfLiteFloatArrayCreate(2);
a->data[0] = 22.0;
a->data[1] = 24.0;
TfLiteFloatArray* b = TfLiteFloatArrayCopy(a);
ASSERT_NE(a, b);
ASSERT_EQ(a->size, b->size);
ASSERT_EQ(a->data[0], b->data[0]);
ASSERT_EQ(a->data[1], b->data[1]);
TfLiteFloatArrayFree(a);
TfLiteFloatArrayFree(b);
}
TEST(Types, TestTypeNames) {
auto type_name = [](TfLiteType t) {
return std::string(TfLiteTypeGetName(t));
};
EXPECT_EQ(type_name(kTfLiteNoType), "NOTYPE");
EXPECT_EQ(type_name(kTfLiteFloat64), "FLOAT64");
EXPECT_EQ(type_name(kTfLiteFloat32), "FLOAT32");
EXPECT_EQ(type_name(kTfLiteFloat16), "FLOAT16");
EXPECT_EQ(type_name(kTfLiteBFloat16), "BFLOAT16");
EXPECT_EQ(type_name(kTfLiteInt16), "INT16");
EXPECT_EQ(type_name(kTfLiteUInt16), "UINT16");
EXPECT_EQ(type_name(kTfLiteInt32), "INT32");
EXPECT_EQ(type_name(kTfLiteUInt32), "UINT32");
EXPECT_EQ(type_name(kTfLiteUInt8), "UINT8");
EXPECT_EQ(type_name(kTfLiteUInt64), "UINT64");
EXPECT_EQ(type_name(kTfLiteInt8), "INT8");
EXPECT_EQ(type_name(kTfLiteInt64), "INT64");
EXPECT_EQ(type_name(kTfLiteBool), "BOOL");
EXPECT_EQ(type_name(kTfLiteComplex64), "COMPLEX64");
EXPECT_EQ(type_name(kTfLiteComplex128), "COMPLEX128");
EXPECT_EQ(type_name(kTfLiteString), "STRING");
EXPECT_EQ(type_name(kTfLiteResource), "RESOURCE");
EXPECT_EQ(type_name(kTfLiteVariant), "VARIANT");
EXPECT_EQ(type_name(kTfLiteInt4), "INT4");
}
TEST(Quantization, TestQuantizationFree) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
t.dims = nullptr;
t.dims_signature = nullptr;
t.quantization.type = kTfLiteAffineQuantization;
t.sparsity = nullptr;
auto* params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
params->scale = TfLiteFloatArrayCreate(3);
params->zero_point = TfLiteIntArrayCreate(3);
t.quantization.params = reinterpret_cast<void*>(params);
TfLiteTensorFree(&t);
}
TEST(Sparsity, TestSparsityFree) {
TfLiteTensor t = {};
t.allocation_type = kTfLiteArenaRw;
t.dims = nullptr;
t.dims_signature = nullptr;
t.sparsity = static_cast<TfLiteSparsity*>(malloc(sizeof(TfLiteSparsity)));
t.sparsity->traversal_order = TfLiteIntArrayCreate(2);
t.sparsity->block_map = nullptr;
t.sparsity->dim_metadata = static_cast<TfLiteDimensionMetadata*>(
malloc(sizeof(TfLiteDimensionMetadata) * 2));
t.sparsity->dim_metadata_size = 2;
t.sparsity->dim_metadata[0].format = kTfLiteDimDense;
t.sparsity->dim_metadata[0].dense_size = 4;
t.sparsity->dim_metadata[1].format = kTfLiteDimSparseCSR;
t.sparsity->dim_metadata[1].array_segments = TfLiteIntArrayCreate(2);
t.sparsity->dim_metadata[1].array_indices = TfLiteIntArrayCreate(3);
TfLiteTensorFree(&t);
}
TEST(TensorCopy, TensorCopy_VALID) {
const int kNumElements = 32;
const int kBytes = sizeof(float) * kNumElements;
TfLiteTensor src;
TfLiteTensor dst;
TfLiteDelegate delegate;
memset(&delegate, 0, sizeof(delegate));
memset(&src, 0, sizeof(TfLiteTensor));
memset(&dst, 0, sizeof(TfLiteTensor));
src.data.raw = static_cast<char*>(malloc(kBytes));
for (int i = 0; i < kNumElements; ++i) {
src.data.f[i] = i;
}
dst.data.raw = static_cast<char*>(malloc(kBytes));
src.bytes = dst.bytes = kBytes;
src.delegate = &delegate;
src.data_is_stale = true;
src.allocation_type = kTfLiteDynamic;
src.type = kTfLiteFloat32;
src.dims = TfLiteIntArrayCreate(1);
src.dims->data[0] = 1;
src.dims_signature = TfLiteIntArrayCopy(src.dims);
src.buffer_handle = 5;
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(&src, &dst));
EXPECT_EQ(dst.bytes, src.bytes);
EXPECT_EQ(dst.delegate, src.delegate);
EXPECT_EQ(dst.data_is_stale, src.data_is_stale);
EXPECT_EQ(dst.type, src.type);
EXPECT_EQ(1, TfLiteIntArrayEqual(dst.dims, src.dims));
EXPECT_EQ(dst.buffer_handle, src.buffer_handle);
for (int i = 0; i < kNumElements; ++i) {
EXPECT_EQ(dst.data.f[i], src.data.f[i]);
}
TfLiteTensorFree(&src);
free(dst.data.raw);
TfLiteTensorFree(&dst);
}
TEST(TensorCopy, TensorCopy_INVALID) {
TfLiteTensor src;
TfLiteTensor dst;
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(&src, nullptr));
EXPECT_EQ(kTfLiteOk, TfLiteTensorCopy(nullptr, &dst));
src.bytes = 10;
dst.bytes = 12;
EXPECT_EQ(kTfLiteError, TfLiteTensorCopy(&src, &dst));
}
TEST(TestTensorRealloc, TensorReallocMoreBytesSucceeds) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 6;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
ASSERT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, new_bytes);
ASSERT_THAT(std::vector<int>(tensor->data.f, tensor->data.f + num_elements),
ElementsAreArray({0, 0, 0, 0}));
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocLessBytesSucceeds) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 2;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
tensor->quantization.type = kTfLiteNoQuantization;
ASSERT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, new_bytes);
ASSERT_THAT(std::vector<int>(tensor->data.f, tensor->data.f + 2),
ElementsAreArray({0, 0}));
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocNonDynamicNoChange) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 6;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteArenaRw;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
EXPECT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, bytes);
EXPECT_THAT(std::vector<int>(tensor->data.i32, tensor->data.i32 + 4),
ElementsAreArray({0, 0, 0, 0}));
free(tensor->data.data);
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocNumByte0) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const int new_num_elements = 0;
const size_t bytes = sizeof(float) * num_elements;
const size_t new_bytes = sizeof(float) * new_num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
EXPECT_EQ(TfLiteTensorRealloc(new_bytes, tensor), kTfLiteOk);
EXPECT_EQ(tensor->bytes, 0);
TfLiteTensorFree(tensor);
free(tensor);
}
TEST(TestTensorRealloc, TensorReallocLargeBytesFails) {
const TfLiteType t = kTfLiteFloat32;
const int num_elements = 4;
const size_t bytes = sizeof(float) * num_elements;
float* data = (float*)malloc(bytes);
memset(data, 0, bytes);
TfLiteIntArray* dims = ConvertVectorToTfLiteIntArray({num_elements});
TfLiteTensor* tensor = (TfLiteTensor*)malloc(sizeof(TfLiteTensor));
tensor->sparsity = nullptr;
tensor->bytes = bytes;
tensor->type = t;
tensor->data.data = data;
tensor->allocation_type = kTfLiteDynamic;
tensor->dims = dims;
tensor->dims_signature = TfLiteIntArrayCopy(dims);
tensor->quantization.type = kTfLiteNoQuantization;
const size_t large_bytes = std::numeric_limits<size_t>::max() - 16;
EXPECT_EQ(TfLiteTensorRealloc(large_bytes, tensor), kTfLiteError);
TfLiteTensorFree(tensor);
free(data);
free(tensor);
}
TEST(TestTfLiteTensorGetAllocationStrategy, MemNoneIsAllocatedWithNone) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyNone);
}
TEST(TestTfLiteTensorGetAllocationStrategy, MmapRoIsAllocatedWithMMap) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyMMap);
}
TEST(TestTfLiteTensorGetAllocationStrategy, ArenaRwIsAllocatedWithArena) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyArena);
}
TEST(TestTfLiteTensorGetAllocationStrategy,
ArenaRwPersistentIsAllocatedWithArena) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyArena);
}
TEST(TestTfLiteTensorGetAllocationStrategy, DynamicIsAllocatedWithMalloc) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyMalloc);
}
TEST(TestTfLiteTensorGetAllocationStrategy,
PersistentRoIsAllocatedWithUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyUnknown);
}
TEST(TestTfLiteTensorGetAllocationStrategy, CustomIsAllocatedWithUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyUnknown);
}
TEST(TestTfLiteTensorGetAllocationStrategy, VariantObjectIsAllocatedWithNew) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetAllocationStrategy(&t),
kTfLiteAllocationStrategyNew);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
MemNoneBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
MmapRoBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetBufferAddressStability, ArenaRwBufferIsStableUnstable) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnstable);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
ArenaRwPersistentBufferIsStableUnstable) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnstable);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
DynamicBufferIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
PersistentRoBufferIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetBufferAddressStability, CustomBufferIsStableUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityUnknown);
}
TEST(TestTfLiteTensorGetBufferAddressStability,
VariantObjectBufferIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetBufferAddressStability(&t),
kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, MemNoneDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, MmapRoDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, ArenaRwDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability,
ArenaRwPersistentDataIsStableAcrossRuns) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityAcrossRuns);
}
TEST(TestTfLiteTensorGetDataStability, DynamicDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability, PersistentRoDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataStability, CustomDataIsStableUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilityUnknown);
}
TEST(TestTfLiteTensorGetDataStability, VariantObjectDataIsStableSingleRun) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetDataStability(&t), kTfLiteRunStabilitySingleRun);
}
TEST(TestTfLiteTensorGetDataKnownStep, MemNoneDataIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetDataKnownStep, MmapRoDataIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetDataKnownStep, ArenaRwDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, ArenaRwPersistentDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, DynamicDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetDataKnownStep, PersistentRoDataIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetDataKnownStep, CustomDataIsKnownAtUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepUnknown);
}
TEST(TestTfLiteTensorGetDataKnownStep, VariantObjectDataIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetDataKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetShapeKnownStep, MemNoneShapeIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMemNone;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetShapeKnownStep, MmapRoShapeIsKnownAtInit) {
TfLiteTensor t;
t.allocation_type = kTfLiteMmapRo;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepInit);
}
TEST(TestTfLiteTensorGetShapeKnownStep, ArenaRwShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRw;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep,
ArenaRwPersistentShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLiteArenaRwPersistent;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep, DynamicShapeIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteDynamic;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepEval);
}
TEST(TestTfLiteTensorGetShapeKnownStep, PersistentRoShapeIsKnownAtPrepare) {
TfLiteTensor t;
t.allocation_type = kTfLitePersistentRo;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepPrepare);
}
TEST(TestTfLiteTensorGetShapeKnownStep, CustomShapeIsKnownAtUnknown) {
TfLiteTensor t;
t.allocation_type = kTfLiteCustom;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepUnknown);
}
TEST(TestTfLiteTensorGetShapeKnownStep, VariantObjectShapeIsKnownAtEval) {
TfLiteTensor t;
t.allocation_type = kTfLiteVariantObject;
EXPECT_EQ(TfLiteTensorGetShapeKnownStep(&t), kTfLiteRunStepEval);
}
struct Foo {
int data;
bool copied;
};
class VariantFoo : public AbstractVariantData<VariantFoo> {
public:
explicit VariantFoo(int number) : foo_data_(Foo{number, false}) {}
VariantFoo(const VariantFoo& other) {
foo_data_ = other.foo_data_;
foo_data_.copied = true;
}
int GetFooInt() { return foo_data_.data; }
bool GetFooCopied() { return foo_data_.copied; }
private:
Foo foo_data_;
};
class VariantFoo2 : public AbstractVariantData<VariantFoo2> {
public:
explicit VariantFoo2(int number, float float_number)
: foo_data_(Foo{number, false}), float_data_(float_number) {}
VariantFoo2(const VariantFoo2& other) {
foo_data_ = other.foo_data_;
foo_data_.copied = true;
float_data_ = other.float_data_;
}
int GetFooInt() { return foo_data_.data; }
bool GetFooCopied() { return foo_data_.copied; }
float GetFloatData() { return float_data_; }
private:
Foo foo_data_;
float float_data_;
};
TEST(TestTfLiteReallocWithObject, ConstructSingleParamVariant) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteOk);
ASSERT_EQ(reinterpret_cast<VariantFoo*>(t->data.data)->GetFooInt(), 3);
ASSERT_EQ(t->type, kTfLiteVariant);
ASSERT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject, ConstructMultiParamVariant) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 3, 1.0)),
kTfLiteOk);
VariantFoo2* data = reinterpret_cast<VariantFoo2*>(t->data.data);
ASSERT_EQ(data->GetFooInt(), 3);
ASSERT_EQ(data->GetFloatData(), 1.0);
ASSERT_EQ(t->type, kTfLiteVariant);
ASSERT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject,
ConstructSingleParamVariantWithAlreadyAllocated) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteOk);
void* before_address = t->data.data;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 5)), kTfLiteOk);
EXPECT_EQ(t->data.data, before_address);
EXPECT_EQ(reinterpret_cast<VariantFoo*>(t->data.data)->GetFooInt(), 5);
EXPECT_EQ(t->type, kTfLiteVariant);
EXPECT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject,
ConstructMutliParamVariantWithAlreadyAllocated) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteVariant;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 3, 1.0)),
kTfLiteOk);
void* before_address = t->data.data;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo2, int, float>(t.get(), 5, 2.0)),
kTfLiteOk);
EXPECT_EQ(t->data.data, before_address);
VariantFoo2* data = reinterpret_cast<VariantFoo2*>(t->data.data);
EXPECT_EQ(data->GetFooInt(), 5);
EXPECT_EQ(data->GetFloatData(), 2.0);
EXPECT_EQ(t->type, kTfLiteVariant);
EXPECT_EQ(t->allocation_type, kTfLiteVariantObject);
}
TEST(TestTfLiteReallocWithObject, NonVariantTypeError) {
TensorUniquePtr t = BuildTfLiteTensor();
t->type = kTfLiteInt32;
ASSERT_EQ((TfLiteTensorVariantRealloc<VariantFoo>(t.get(), 3)), kTfLiteError);
}
TEST(TestVariantData, CopyVariantTensorCallsDerivedCopyCstor) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->allocation_type = kTfLiteVariantObject;
tensor->type = kTfLiteVariant;
}
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
auto* src_variant_data =
reinterpret_cast<VariantFoo*>(src_variant_tensor->data.data);
EXPECT_EQ(src_variant_data->GetFooInt(), 1);
EXPECT_EQ(src_variant_data->GetFooCopied(), false);
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_data->GetFooCopied(), true);
}
TEST(TestVariantData, CopyVariantTensorCallsDerivedCopyCstorWithAllocation) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->allocation_type = kTfLiteVariantObject;
tensor->type = kTfLiteVariant;
}
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(dst_variant_tensor.get(), 2)),
kTfLiteOk);
void* before_address = dst_variant_tensor->data.data;
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_tensor->data.data, before_address);
}
TEST(TestVariantData, CopyTensorToNonVariantObjectSetsAllocationType) {
TensorUniquePtr src_variant_tensor = BuildTfLiteTensor();
TensorUniquePtr dst_variant_tensor = BuildTfLiteTensor();
for (TfLiteTensor* tensor :
{src_variant_tensor.get(), dst_variant_tensor.get()}) {
tensor->dims = ConvertVectorToTfLiteIntArray({0});
tensor->type = kTfLiteVariant;
}
src_variant_tensor->allocation_type = kTfLiteVariantObject;
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(src_variant_tensor.get(), 1)),
kTfLiteOk);
ASSERT_EQ(
(TfLiteTensorVariantRealloc<VariantFoo>(dst_variant_tensor.get(), 2)),
kTfLiteOk);
void* before_address = dst_variant_tensor->data.data;
ASSERT_EQ(
TfLiteTensorCopy(src_variant_tensor.get(), dst_variant_tensor.get()),
kTfLiteOk);
ASSERT_EQ(dst_variant_tensor->allocation_type, kTfLiteVariantObject);
auto* dst_variant_data =
reinterpret_cast<VariantFoo*>(dst_variant_tensor->data.data);
EXPECT_EQ(dst_variant_data->GetFooInt(), 1);
EXPECT_EQ(dst_variant_tensor->data.data, before_address);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/common.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/c/common_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ececb0ff-9d9f-410b-8980-048362fe68f2 | cpp | google/libphonenumber | unicodestring | cpp/src/phonenumbers/unicodestring.cc | cpp/test/phonenumbers/unicodestring_test.cc | #include "phonenumbers/unicodestring.h"
#include <algorithm>
#include <cassert>
#include <iterator>
using std::advance;
using std::equal;
namespace i18n {
namespace phonenumbers {
UnicodeString& UnicodeString::operator=(const UnicodeString& src) {
if (&src != this) {
invalidateCachedIndex();
text_ = src.text_;
}
return *this;
}
bool UnicodeString::operator==(const UnicodeString& rhs) const {
return equal(text_.begin(), text_.end(), rhs.text_.begin());
}
void UnicodeString::append(const UnicodeString& unicode_string) {
invalidateCachedIndex();
for (UnicodeString::const_iterator it = unicode_string.begin();
it != unicode_string.end(); ++it) {
append(*it);
}
}
int UnicodeString::indexOf(char32 codepoint) const {
int pos = 0;
for (UnicodeText::const_iterator it = text_.begin(); it != text_.end();
++it, ++pos) {
if (*it == codepoint) {
return pos;
}
}
return -1;
}
void UnicodeString::replace(int start, int length, const UnicodeString& src) {
assert(length >= 0 && length <= this->length());
invalidateCachedIndex();
UnicodeText::const_iterator start_it = text_.begin();
advance(start_it, start);
UnicodeText unicode_text;
unicode_text.append(text_.begin(), start_it);
unicode_text.append(src.text_);
advance(start_it, length);
unicode_text.append(start_it, text_.end());
text_ = unicode_text;
}
void UnicodeString::setCharAt(int pos, char32 c) {
assert(pos < length());
invalidateCachedIndex();
UnicodeText::const_iterator pos_it = text_.begin();
advance(pos_it, pos);
UnicodeText unicode_text;
unicode_text.append(text_.begin(), pos_it);
unicode_text.push_back(c);
++pos_it;
unicode_text.append(pos_it, text_.end());
text_ = unicode_text;
}
UnicodeString UnicodeString::tempSubString(int start, int length) const {
const int unicodestring_length = this->length();
if (length == std::numeric_limits<int>::max()) {
length = unicodestring_length - start;
}
if (start > unicodestring_length || length > unicodestring_length) {
return UnicodeString("");
}
UnicodeText::const_iterator start_it = text_.begin();
advance(start_it, start);
UnicodeText::const_iterator end_it = start_it;
advance(end_it, length);
UnicodeString substring;
substring.text_.PointTo(start_it, end_it);
return substring;
}
char32 UnicodeString::operator[](int index) const {
assert(index < length());
if (cached_index_ == -1 || cached_index_ > index) {
cached_it_ = text_.begin();
cached_index_ = 0;
}
for (; cached_index_ < index; ++cached_index_, ++cached_it_) {}
return *cached_it_;
}
}
} | #include <iostream>
#include <gtest/gtest.h>
#include "phonenumbers/unicodestring.h"
using std::ostream;
namespace i18n {
namespace phonenumbers {
ostream& operator<<(ostream& out, const UnicodeString& s) {
string utf8;
s.toUTF8String(utf8);
out << utf8;
return out;
}
TEST(UnicodeString, ToUTF8StringWithEmptyString) {
UnicodeString s;
string utf8;
s.toUTF8String(utf8);
EXPECT_EQ("", utf8);
}
TEST(UnicodeString, ToUTF8String) {
UnicodeString s("hello");
string utf8;
s.toUTF8String(utf8);
EXPECT_EQ("hello", utf8);
}
TEST(UnicodeString, ToUTF8StringWithNonAscii) {
UnicodeString s("\xEF\xBC\x95\xEF\xBC\x93" );
string utf8;
s.toUTF8String(utf8);
EXPECT_EQ("\xEF\xBC\x95\xEF\xBC\x93", utf8);
}
TEST(UnicodeString, AppendCodepoint) {
UnicodeString s;
s.append('h');
ASSERT_EQ(UnicodeString("h"), s);
s.append('e');
EXPECT_EQ(UnicodeString("he"), s);
}
TEST(UnicodeString, AppendCodepointWithNonAscii) {
UnicodeString s;
s.append(0xFF15 );
ASSERT_EQ(UnicodeString("\xEF\xBC\x95" ), s);
s.append(0xFF13 );
EXPECT_EQ(UnicodeString("\xEF\xBC\x95\xEF\xBC\x93" ), s);
}
TEST(UnicodeString, AppendUnicodeString) {
UnicodeString s;
s.append(UnicodeString("he"));
ASSERT_EQ(UnicodeString("he"), s);
s.append(UnicodeString("llo"));
EXPECT_EQ(UnicodeString("hello"), s);
}
TEST(UnicodeString, AppendUnicodeStringWithNonAscii) {
UnicodeString s;
s.append(UnicodeString("\xEF\xBC\x95" ));
ASSERT_EQ(UnicodeString("\xEF\xBC\x95"), s);
s.append(UnicodeString("\xEF\xBC\x93" ));
EXPECT_EQ(UnicodeString("\xEF\xBC\x95\xEF\xBC\x93" ), s);
}
TEST(UnicodeString, IndexOf) {
UnicodeString s("hello");
EXPECT_EQ(0, s.indexOf('h'));
EXPECT_EQ(2, s.indexOf('l'));
EXPECT_EQ(4, s.indexOf('o'));
}
TEST(UnicodeString, IndexOfWithNonAscii) {
UnicodeString s("\xEF\xBC\x95\xEF\xBC\x93" );
EXPECT_EQ(1, s.indexOf(0xFF13 ));
}
TEST(UnicodeString, ReplaceWithEmptyInputs) {
UnicodeString s;
s.replace(0, 0, UnicodeString(""));
EXPECT_EQ(UnicodeString(""), s);
}
TEST(UnicodeString, ReplaceWithEmptyReplacement) {
UnicodeString s("hello");
s.replace(0, 5, UnicodeString(""));
EXPECT_EQ(UnicodeString(""), s);
}
TEST(UnicodeString, ReplaceBegining) {
UnicodeString s("hello world");
s.replace(0, 5, UnicodeString("HELLO"));
EXPECT_EQ(UnicodeString("HELLO world"), s);
}
TEST(UnicodeString, ReplaceMiddle) {
UnicodeString s("hello world");
s.replace(5, 1, UnicodeString("AB"));
EXPECT_EQ(UnicodeString("helloABworld"), s);
}
TEST(UnicodeString, ReplaceEnd) {
UnicodeString s("hello world");
s.replace(10, 1, UnicodeString("AB"));
EXPECT_EQ(UnicodeString("hello worlAB"), s);
}
TEST(UnicodeString, ReplaceWithNonAscii) {
UnicodeString s("hello world");
s.replace(3, 2, UnicodeString("\xEF\xBC\x91\xEF\xBC\x90" ));
EXPECT_EQ(UnicodeString("hel\xEF\xBC\x91\xEF\xBC\x90 world"), s);
}
TEST(UnicodeString, SetCharBegining) {
UnicodeString s("hello");
s.setCharAt(0, 'H');
EXPECT_EQ(UnicodeString("Hello"), s);
}
TEST(UnicodeString, SetCharMiddle) {
UnicodeString s("hello");
s.setCharAt(2, 'L');
EXPECT_EQ(UnicodeString("heLlo"), s);
}
TEST(UnicodeString, SetCharEnd) {
UnicodeString s("hello");
s.setCharAt(4, 'O');
EXPECT_EQ(UnicodeString("hellO"), s);
}
TEST(UnicodeString, SetCharWithNonAscii) {
UnicodeString s("hello");
s.setCharAt(4, 0xFF10 );
EXPECT_EQ(UnicodeString("hell\xEF\xBC\x90" ), s);
}
TEST(UnicodeString, TempSubStringWithEmptyString) {
EXPECT_EQ(UnicodeString(""), UnicodeString().tempSubString(0, 0));
}
TEST(UnicodeString, TempSubStringWithInvalidInputs) {
UnicodeString s("hello");
EXPECT_EQ(UnicodeString(""), s.tempSubString(6));
EXPECT_EQ(UnicodeString(""), s.tempSubString(2, 6));
}
TEST(UnicodeString, TempSubString) {
UnicodeString s("hello");
EXPECT_EQ(UnicodeString(""), s.tempSubString(0, 0));
EXPECT_EQ(UnicodeString("h"), s.tempSubString(0, 1));
EXPECT_EQ(UnicodeString("hello"), s.tempSubString(0, 5));
EXPECT_EQ(UnicodeString("llo"), s.tempSubString(2, 3));
}
TEST(UnicodeString, TempSubStringWithNoLength) {
UnicodeString s("hello");
EXPECT_EQ(UnicodeString("hello"), s.tempSubString(0));
EXPECT_EQ(UnicodeString("llo"), s.tempSubString(2));
}
TEST(UnicodeString, TempSubStringWithNonAscii) {
UnicodeString s("hel\xEF\xBC\x91\xEF\xBC\x90" );
EXPECT_EQ(UnicodeString("\xEF\xBC\x91" ), s.tempSubString(3, 1));
}
TEST(UnicodeString, OperatorEqual) {
UnicodeString s("hello");
s = UnicodeString("Hello");
EXPECT_EQ(UnicodeString("Hello"), s);
}
TEST(UnicodeString, OperatorEqualWithNonAscii) {
UnicodeString s("hello");
s = UnicodeString("hel\xEF\xBC\x91\xEF\xBC\x90" );
EXPECT_EQ(UnicodeString("hel\xEF\xBC\x91\xEF\xBC\x90"), s);
}
TEST(UnicodeString, OperatorBracket) {
UnicodeString s("hello");
EXPECT_EQ('h', s[0]);
EXPECT_EQ('e', s[1]);
EXPECT_EQ('l', s[2]);
EXPECT_EQ('l', s[3]);
EXPECT_EQ('o', s[4]);
}
TEST(UnicodeString, OperatorBracketWithNonAscii) {
UnicodeString s("hel\xEF\xBC\x91\xEF\xBC\x90" );
EXPECT_EQ('h', s[0]);
EXPECT_EQ('e', s[1]);
EXPECT_EQ('l', s[2]);
EXPECT_EQ(0xFF11 , s[3]);
EXPECT_EQ(0xFF10 , s[4]);
}
TEST(UnicodeString, OperatorBracketWithIteratorCacheInvalidation) {
UnicodeString s("hello");
EXPECT_EQ('h', s[0]);
EXPECT_EQ('e', s[1]);
s.setCharAt(1, 'E');
EXPECT_EQ(UnicodeString("hEllo"), s);
EXPECT_EQ('E', s[1]);
EXPECT_EQ('h', s[0]);
EXPECT_EQ('o', s[4]);
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/unicodestring.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/unicodestring_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
be6c354f-375c-4008-a3fc-46f4eaa2f4f4 | cpp | tensorflow/tensorflow | saved_model_export | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/constants.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/unfreeze_constants.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::kTfSavedModelInitializerInitType;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::GetLocalTmpFileName;
using ::tensorflow::AssetFileDef;
using ::tensorflow::FunctionDefLibrary;
using ::tensorflow::FunctionLibraryDefinition;
using ::tensorflow::Graph;
using ::tensorflow::GraphDef;
using ::tensorflow::Node;
using ::tensorflow::NodeDef;
using ::tensorflow::OpRegistry;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::tensorflow::quantization::RunPasses;
using ::tensorflow::quantization::UnfreezeConstantsAndSaveVariables;
std::string GetNodeName(const std::vector<std::string>& control_ret_node_names,
const absl::string_view contains) {
for (const std::string& node_name : control_ret_node_names) {
if (absl::StrContains(node_name, contains)) {
VLOG(1) << "Node found: " << node_name << ", contains: " << contains;
return node_name;
}
}
VLOG(1) << "Could not find node whose name conatins: " << contains;
return "";
}
std::string FindFilePrefixTensorName(const GraphDef& graph_def) {
for (const NodeDef& node_def : graph_def.node()) {
if (node_def.op() == FunctionLibraryDefinition::kArgOp) {
const auto index_path_attr_itr =
node_def.attr().find(kTfSavedModelIndexPathAttr.str());
if (index_path_attr_itr != node_def.attr().end()) {
const auto& index_paths = index_path_attr_itr->second.list().s();
if (absl::c_find(index_paths, kTfFilePrefix.str()) !=
index_paths.end()) {
return absl::StrCat(node_def.name(), ":0");
}
}
}
}
return "";
}
}
absl::StatusOr<ExportedModel> CreateExportedModel(
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
absl::string_view debug_name_prefix,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND, ModuleOp module_op) {
TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName());
const ExportOptions export_opts = {
true,
false, checkpoint_dir,
absl::StrCat(debug_name_prefix, kExportStepSuffix)};
TF_ASSIGN_OR_RETURN(const SmallVector<AssetFileDef> asset_file_defs,
RunExportPasses(export_opts, ctx, module_op));
return ConvertMlirModuleToExportedModel(
module_op, checkpoint_dir, function_aliases,
{asset_file_defs.begin(), asset_file_defs.end()});
}
ExportedModel CreateExportedModelFromGraphDef(
GraphDef&& graph_def, const absl::string_view init_node_name,
const absl::string_view checkpoint_dir,
const std::optional<SaverDef> saver_def,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
ExportedModel exported_model{};
*exported_model.mutable_graph_def() = graph_def;
exported_model.set_init_node_name(std::string(init_node_name));
exported_model.set_checkpoint_dir(std::string(checkpoint_dir));
exported_model.mutable_function_aliases()->insert(function_aliases.begin(),
function_aliases.end());
for (const AssetFileDef& asset_file_def : asset_file_defs) {
*exported_model.mutable_asset_file_defs()->Add() = asset_file_def;
}
if (saver_def != std::nullopt) {
*exported_model.mutable_saver_def() = *std::move(saver_def);
}
return exported_model;
}
void AddExportPasses(mlir::PassManager& pm,
const bool duplicate_shape_determining_constants) {
AddCallModuleSerializationPasses(pm);
if (duplicate_shape_determining_constants) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateDuplicateShapeDeterminingConstantsPass());
}
pm.addPass(mlir::quant::CreateInsertMainFunctionPass());
pm.addPass(mlir::quant::CreateLiftHashTableOpsAsArgsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addPass(mlir::CreateBreakUpIslandsPass());
pm.addPass(mlir::quant::CreateMergeInitializerFunctionOpsToMainPass());
pm.addPass(mlir::quant::CreateMergeSaveFunctionOpsToMainPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateMergeDuplicateResourceOpsPass());
pm.addPass(mlir::TF::CreateStripNoinlineAttributePass());
}
absl::StatusOr<std::optional<SaverDef>> CreateSaverDef(
const std::vector<std::string>& control_ret_node_names,
const GraphDef& graph_def) {
const std::string filename_tensor_name = FindFilePrefixTensorName(graph_def);
const std::string restore_op_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerRestoreType);
const std::string save_node_name =
GetNodeName(control_ret_node_names, kTfQuantSaveOpName);
const std::vector<absl::string_view> fields = {
filename_tensor_name, restore_op_name, save_node_name};
const auto is_empty_predicate = [](const absl::string_view s) {
return s.empty();
};
if (absl::c_all_of(fields, is_empty_predicate)) {
return std::nullopt;
} else if (absl::c_none_of(fields, is_empty_predicate)) {
SaverDef saver_def{};
saver_def.set_version(SaverDef::V2);
saver_def.set_filename_tensor_name(filename_tensor_name);
saver_def.set_restore_op_name(restore_op_name);
saver_def.set_save_tensor_name(absl::StrCat(save_node_name, ":0"));
return saver_def;
} else {
return absl::InternalError(
absl::StrCat("Failed to create SaverDef. Fields should be either all "
"empty strings or all non-empty strings. Got fields: ",
absl::StrJoin(fields, ",")));
}
}
absl::StatusOr<ExportedModel> ConvertMlirModuleToExportedModel(
const mlir::ModuleOp module_op, const absl::string_view checkpoint_dir,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
const tensorflow::GraphExportConfig config{};
FunctionLibraryDefinition flib_def{OpRegistry::Global(),
FunctionDefLibrary()};
std::unique_ptr<Graph> graph;
absl::flat_hash_set<Node*> control_ret_nodes{};
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module_op, config, &graph, &flib_def, &control_ret_nodes));
GraphDef graph_def{};
graph->ToGraphDef(&graph_def);
std::vector<std::string> control_ret_node_names{};
for (Node* node : control_ret_nodes) {
control_ret_node_names.push_back(node->name());
}
const std::string init_node_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerInitType);
TF_ASSIGN_OR_RETURN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
return CreateExportedModelFromGraphDef(std::move(graph_def), init_node_name,
checkpoint_dir, std::move(saver_def),
function_aliases, asset_file_defs);
}
absl::StatusOr<SmallVector<AssetFileDef>> RunExportPasses(
const ExportOptions& export_opts, MLIRContext& ctx, ModuleOp module_op) {
if (export_opts.unfreeze_constants) {
TF_RETURN_IF_ERROR(UnfreezeConstantsAndSaveVariables(
export_opts.checkpoint_dir, ctx, module_op));
LOG(INFO) << "Unfrozen constants and saved variables to checkpoint file: "
<< export_opts.checkpoint_dir;
}
TF_RETURN_IF_ERROR(RunPasses(
export_opts.debug_name,
[dup_constants = export_opts.duplicate_shape_determining_constants](
PassManager& pm) { AddExportPasses(pm, dup_constants); },
ctx, module_op));
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
quant::ConvertAssetArgs(module_op);
if (failed(asset_file_defs)) {
return absl::InternalError("Failed to convert asset args.");
}
return *asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::tensorflow::AssetFileDef;
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateExportedModelTest, CreateExportedModelBasicFieldsSet) {
GraphDef graph_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(node { name: "foo" })pb", &graph_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
std::move(graph_def), "init_node_name", "checkpoint_dir",
std::nullopt,
{}, {});
ASSERT_THAT(exported_model.graph_def().node(), SizeIs(1));
EXPECT_THAT(exported_model.graph_def().node()[0].name(), StrEq("foo"));
EXPECT_THAT(exported_model.init_node_name(), StrEq("init_node_name"));
EXPECT_THAT(exported_model.checkpoint_dir(), StrEq("checkpoint_dir"));
EXPECT_FALSE(exported_model.has_saver_def());
EXPECT_THAT(exported_model.function_aliases(), IsEmpty());
EXPECT_THAT(exported_model.asset_file_defs(), IsEmpty());
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedFunctionAliases) {
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt,
{{"func1", "alias1"}, {"func2", "alias2"}},
{});
ASSERT_THAT(exported_model.function_aliases(), SizeIs(2));
EXPECT_TRUE(exported_model.function_aliases().contains("func1"));
EXPECT_THAT(exported_model.function_aliases().at("func1"), StrEq("alias1"));
EXPECT_TRUE(exported_model.function_aliases().contains("func2"));
EXPECT_THAT(exported_model.function_aliases().at("func2"), StrEq("alias2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedAssetFileDefs) {
AssetFileDef asset1;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname1")pb", &asset1));
AssetFileDef asset2;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname2")pb", &asset2));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt, {},
{asset1, asset2});
ASSERT_THAT(exported_model.asset_file_defs(), SizeIs(2));
EXPECT_THAT(exported_model.asset_file_defs()[0].filename(), StrEq("fname1"));
EXPECT_THAT(exported_model.asset_file_defs()[1].filename(), StrEq("fname2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedSaverDef) {
SaverDef saver_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(filename_tensor_name: "my_file")pb", &saver_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "", saver_def,
{}, {});
EXPECT_THAT(exported_model.saver_def().filename_tensor_name(), "my_file");
}
TEST(CreateSaverDefTest, CreateValidSaverDef) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node {
name: "foo",
op: "_Arg",
attr {
key: "tf_saved_model.index_path",
value { list { s: "__tf_file_prefix" } }
}
})pb",
&graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
TF_ASSERT_OK_AND_ASSIGN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
ASSERT_NE(saver_def, std::nullopt);
EXPECT_THAT(saver_def->version(), SaverDef::V2);
EXPECT_THAT(saver_def->restore_op_name(), "restore_op_0");
EXPECT_THAT(saver_def->filename_tensor_name(), "foo:0");
EXPECT_THAT(saver_def->save_tensor_name(), "tf_quant__save_op_0:0");
}
TEST(CreateSaverDefTest, ReturnsNulloptIfNoSaverDefRelatedNodesExist) {
TF_ASSERT_OK_AND_ASSIGN(
const std::optional<SaverDef> saver_def,
CreateSaverDef({}, GraphDef()));
EXPECT_EQ(saver_def, std::nullopt);
}
TEST(CreateSaverDefTest, ReturnsErrorStatusIfSaverDefNodesPartiallyExist) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node { name: "foo", op: "_Arg" })pb", &graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
const absl::StatusOr<std::optional<SaverDef>> saver_def =
CreateSaverDef(control_ret_node_names, graph_def);
EXPECT_THAT(
saver_def,
StatusIs(
absl::StatusCode::kInternal,
HasSubstr(
"should be either all empty strings or all non-empty strings")));
}
using ConvertMlirModuleToExportedModelTest =
::mlir::quant::QuantizationTestBase;
TEST_F(ConvertMlirModuleToExportedModelTest, SimpleGraphDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main(%arg: tensor<1x2xf32> {tf_saved_model.index_path = ["input_tensor:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output_tensor:0"]}) attributes {tf.entry_function = {inputs = "input_tensor:0", outputs = "output_tensor:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
tf_executor.fetch %arg : tensor<1x2xf32>
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->graph_def().node(), SizeIs(2));
const auto arg_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Arg"; });
ASSERT_NE(arg_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(arg_node_itr->name(), StrEq("input_tensor"));
ASSERT_TRUE(arg_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(arg_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
arg_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("input_tensor:0"));
const auto retval_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Retval"; });
ASSERT_NE(retval_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(retval_node_itr->name(), StrEq("output_tensor"));
ASSERT_TRUE(retval_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("output_tensor:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, CheckpointDirSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->checkpoint_dir(), StrEq("my_checkpoint_dir"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, FunctionAliasesSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func private @function_1() -> () attributes {tf._original_func_name = "__func_1"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func private @function_2() -> () attributes {tf._original_func_name = "__func_2"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_1}> : () -> ()
%control_1 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_2}> : () -> ()
tf_executor.fetch %control_0, %control_1 : !tf_executor.control, !tf_executor.control
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(
*module_op, "",
{{"alias_1", "function_1"}, {"alias_2", "function_2"}},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->function_aliases(), SizeIs(2));
EXPECT_THAT(exported_model->function_aliases().at("alias_1"),
StrEq("function_1"));
EXPECT_THAT(exported_model->function_aliases().at("alias_2"),
StrEq("function_2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, AssetFileDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
AssetFileDef asset_file_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "vocab_file.txt",
tensor_info { name: "arg_0:0" })pb",
&asset_file_def));
const std::vector<AssetFileDef> asset_file_defs = {asset_file_def};
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
asset_file_defs);
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->asset_file_defs(), SizeIs(1));
EXPECT_THAT(exported_model->asset_file_defs()[0].filename(),
StrEq("vocab_file.txt"));
EXPECT_THAT(exported_model->asset_file_defs()[0].tensor_info().name(),
StrEq("arg_0:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest,
InitNodeNameSetToLocOfControlOutput) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%o_0, %c_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%o, %c = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%c_9 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%o, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%c_10 = tf_executor.island(%c_9) wraps "tf.NoOp"() : () -> () loc("init_op_init_all_tables")
tf_executor.fetch %o_0, %c_10 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(),
StrEq("init_op_init_all_tables"));
const auto init_node_itr = llvm::find_if(
exported_model->graph_def().node(), [](const NodeDef& node) {
return node.name() == "init_op_init_all_tables";
});
ASSERT_NE(init_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(init_node_itr->op(), StrEq("NoOp"));
ASSERT_THAT(init_node_itr->input(), SizeIs(1));
EXPECT_THAT(init_node_itr->input()[0],
StrEq("^tf.InitializeTableFromTextFileV2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, InitNodeNotSetIfLocNameMismatch) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%output_0, %control_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%output_1, %control_1 = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%control_2 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%output_1, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%control_3 = tf_executor.island(%control_2) wraps "tf.NoOp"() : () -> () loc("init_ok")
tf_executor.fetch %output_0, %control_3 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(), IsEmpty());
}
TEST_F(ConvertMlirModuleToExportedModelTest,
ConversionFailureWhenNoMainFunction) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @not_main() -> () attributes {tf_saved_model.exported_names = ["not_main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
EXPECT_THAT(exported_model,
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("entry function `main` must be present")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
abd91a2c-ad24-436a-9933-db976e886b9a | cpp | google/arolla | dummy_operator | arolla/expr/operator_loader/dummy_operator.cc | arolla/expr/operator_loader/dummy_operator_test.cc | #include "arolla/expr/operator_loader/dummy_operator.h"
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
using ::arolla::expr::ExprAttributes;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
DummyOperator::DummyOperator(absl::string_view name,
ExprOperatorSignature signature,
absl::string_view doc, QTypePtr result_qtype)
: ExprOperatorWithFixedSignature(
name, signature, doc,
FingerprintHasher("::arolla::operator_loader::DummyOperator")
.Combine(name, signature, doc, result_qtype)
.Finish()),
result_qtype_(std::move(result_qtype)) {}
absl::string_view DummyOperator::py_qvalue_specialization_key() const {
return "::arolla::operator_loader::DummyOperator";
}
absl::StatusOr<ExprAttributes> DummyOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
return ExprAttributes(result_qtype_);
}
} | #include "arolla/expr/operator_loader/dummy_operator.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/array/qtype/types.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/unit.h"
namespace arolla::operator_loader {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::testing::AllOf;
using ::testing::HasSubstr;
TEST(DummyOperatorTest, GetName) {
DummyOperator op("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
ASSERT_THAT(op.display_name(), "my_dummy_op");
}
TEST(DummyOperatorTest, GetDoc) {
DummyOperator op("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
ASSERT_THAT(op.doc(), "dummy op docstring");
ASSERT_THAT(op.GetDoc(), IsOkAndHolds("dummy op docstring"));
}
TEST(DummyOperatorTest, GetOutputQType) {
{
DummyOperator op("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
EXPECT_EQ(op.GetOutputQType(), GetArrayQType<int32_t>());
}
{
DummyOperator op("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetQType<OptionalValue<float>>());
EXPECT_EQ(op.GetOutputQType(), GetQType<OptionalValue<float>>());
}
}
TEST(DummyOperatorTest, QTypeInference) {
{
auto op = std::make_shared<DummyOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
ASSERT_OK_AND_ASSIGN(auto expr,
CallOp(op, {Literal(1.5f), Literal(kUnit)}));
EXPECT_EQ(expr->qtype(), GetArrayQType<int32_t>());
}
{
auto op = std::make_shared<DummyOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Leaf("x"), Leaf("y")}));
EXPECT_EQ(expr->qtype(), GetArrayQType<int32_t>());
}
}
TEST(DummyOperatorTest, InferAttributesIncorrectArity) {
DummyOperator op("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetArrayQType<int32_t>());
EXPECT_THAT(op.InferAttributes({}),
StatusIs(absl::StatusCode::kInvalidArgument,
AllOf(HasSubstr("incorrect number of dependencies"),
HasSubstr("expected 2 but got 0"))));
}
TEST(DummyOperatorTest, Eval) {
auto op = std::make_shared<DummyOperator>(
"my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}}, "dummy op docstring",
GetArrayQType<int32_t>());
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp(op, {Literal(1.5f), Literal(OptionalValue<Unit>())}));
EXPECT_THAT(
Invoke(expr, {}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("my_dummy_op is not a builtin or backend ExprOperator")));
}
TEST(DummyOperatorTest, Fingerprint) {
DummyOperator op1("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetQType<float>());
{
DummyOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetQType<float>());
EXPECT_EQ(op1.fingerprint(), op2.fingerprint());
}
{
DummyOperator op2("another_name", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetQType<float>());
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
DummyOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}},
"dummy op docstring", GetQType<float>());
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
DummyOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"another docstring", GetQType<float>());
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
{
DummyOperator op2("my_dummy_op", ExprOperatorSignature{{"x"}, {"y"}},
"dummy op docstring", GetQType<int32_t>());
EXPECT_NE(op1.fingerprint(), op2.fingerprint());
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/dummy_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/dummy_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
363476b4-86dd-4517-8a90-829165178641 | cpp | tensorflow/tensorflow | array_util | third_party/xla/xla/python/ifrt_proxy/common/array_util.cc | third_party/xla/xla/python/ifrt_proxy/common/array_util_test.cc | #include "xla/python/ifrt_proxy/common/array_util.h"
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
std::string StridesAsStr(const ArrayMemRegion::ByteStrides& strides) {
if (!strides.has_value()) return "strides{nullopt}";
return absl::StrCat("strides{", absl::StrJoin(*strides, ","), "}");
}
}
absl::StatusOr<std::vector<int64_t>> DefaultByteStrides(const DType dtype,
const Shape& shape) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to query byte-strides for: ",
dtype.DebugString()));
}
std::vector<int64_t> result(shape.dims().size());
int64_t stride = *dtype.byte_size();
for (int i = static_cast<int>(shape.dims().size()) - 1; i >= 0; --i) {
result[i] = stride;
stride *= shape.dims()[i];
}
return result;
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromZerothElementPointer(
const void* zeroth_element, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
if (!dtype.byte_size().has_value()) {
return absl::InvalidArgumentError(
absl::StrCat("Unsupported data type to construct ArrayMemRegion: ",
dtype.DebugString()));
}
void* const mem_region_start = const_cast<void*>(zeroth_element);
if (!byte_strides.has_value() ||
(byte_strides->empty() && shape.dims().empty())) {
return ArrayMemRegion(mem_region_start,
dtype.byte_size().value() * shape.num_elements());
}
if (shape.num_elements() == 0) {
return ArrayMemRegion(mem_region_start, 0);
}
if (shape.dims().size() != byte_strides->size()) {
return absl::InvalidArgumentError(
absl::StrCat("Shape has different dimensions from byte_strides: ",
shape.DebugString(), " vs ", StridesAsStr(byte_strides)));
}
uint64_t last_element_byte_offset = 0;
for (int i = 0; i < byte_strides->size(); ++i) {
int stride = (*byte_strides)[i];
if (shape.dims()[i] < 0) {
return absl::InvalidArgumentError(
absl::StrCat("A shape dimension is negative: ", shape.DebugString()));
} else if (shape.dims()[i] == 1) {
continue;
} else if (stride <= 0) {
return absl::UnimplementedError(
absl::StrCat("Negative or zero strides are not fully supported: ",
StridesAsStr(byte_strides)));
} else if (stride % dtype.byte_size().value() != 0) {
return absl::UnimplementedError(absl::StrCat(
"byte_stride[", i, "] is not a multiple of the data-type's size: ",
StridesAsStr(byte_strides), ", dtype=", dtype.DebugString()));
} else {
DCHECK_GT(shape.dims()[i], 0);
last_element_byte_offset += (stride * (shape.dims()[i] - 1));
}
}
return ArrayMemRegion(mem_region_start,
last_element_byte_offset + dtype.byte_size().value());
}
absl::StatusOr<ArrayMemRegion> ArrayMemRegion::FromMinimalMemRegion(
absl::string_view mem_region, const DType dtype, const Shape& shape,
ByteStrides byte_strides) {
TF_ASSIGN_OR_RETURN(
auto result,
FromZerothElementPointer(mem_region.data(), dtype, shape, byte_strides));
if (result.mem_region().size() != mem_region.size()) {
return absl::InvalidArgumentError(
absl::StrCat("Incorrect size ", result.mem_region().size(), " vs ",
mem_region.size(), "; is provided memory region minimal? ",
dtype.DebugString(), " ", shape.DebugString(), " ",
StridesAsStr(byte_strides)));
}
CHECK_EQ(result.mem_region().data(), mem_region.data());
return result;
}
absl::string_view ArrayMemRegion::mem_region() const {
return absl::string_view(static_cast<char*>(mem_region_start_), nbytes_);
}
void* ArrayMemRegion::zeroth_element() const {
return mem_region_start_;
}
}
}
} | #include "xla/python/ifrt_proxy/common/array_util.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/shape.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace proxy {
namespace {
using ::testing::ElementsAre;
using ::testing::Not;
using ::testing::TestWithParam;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
constexpr DType::Kind kF64 = DType::Kind::kF64;
constexpr DType::Kind kS32 = DType::Kind::kS32;
constexpr DType::Kind kString = DType::Kind::kString;
using Strides = std::vector<int64_t>;
TEST(DefaultByteStrides, ErrorsIfBadDtype) {
EXPECT_THAT(DefaultByteStrides(DType(kString), Shape({1})), Not(IsOk()));
}
TEST(DefaultByteStrides, HappyCase) {
EXPECT_THAT(DefaultByteStrides(DType(kF64), Shape({4, 3, 5})),
IsOkAndHolds(ElementsAre(120, 40, 8)));
}
struct TC {
const std::string test_name;
const DType::Kind dtype_kind;
const std::vector<int64_t> shape;
const std::optional<std::vector<int64_t>> byte_strides;
const std::optional<size_t> expected_size;
};
std::string PrintToString(const TC& tc) { return tc.test_name; }
class ArrayMemRegionSuccess : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionSuccess,
testing::Values(
TC{"DefaultF64", kF64, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesF64", kF64, {4, 3, 5}, Strides({120, 40, 8})},
TC{"NotMajorToMinorF64", kF64, {3, 4, 5}, Strides({40, 120, 8})},
TC{"TransposedF64", kF64, {5, 3, 4}, Strides({8, 40, 120})},
TC{"DefaultS32", kS32, {4, 3, 5}, std::nullopt},
TC{"MajorToMinorStridesS32", kS32, {4, 3, 5}, Strides({60, 20, 4})},
TC{"NotMajorToMinorS32", kS32, {3, 4, 5}, Strides({20, 60, 4})},
TC{"TransposedS32", kS32, {5, 3, 4}, Strides({4, 20, 60})},
TC{"ScalarF64DefaultStrides", kF64, {}, std::nullopt},
TC{"ScalarF64EmptyStrides", kF64, {}, Strides({})},
TC{"NoColsDefaultStrides", kF64, {5, 0}, std::nullopt},
TC{"NoColsStridesNonZero", kF64, {5, 0}, Strides({40, 4})},
TC{"NoColsStridesZero", kF64, {5, 0}, Strides({0, 0})},
TC{"NoRowsDefaultStrides", kF64, {0, 5}, std::nullopt},
TC{"NoRowsStridesNonZero", kF64, {0, 5}, Strides({40, 4})},
TC{"NoRowsStridesZero", kF64, {0, 5}, Strides({0, 0})},
TC{"SingleElementArbitraryStrides", kF64, {1, 1}, Strides({100, 100})},
TC{"OneRowArbitraryColStride", kF64, {1, 5}, Strides({100, 8})},
TC{"OneColArbitraryRowStride", kF64, {5, 1}, Strides({8, 100})},
TC{"OneRowZeroColStride", kF64, {1, 5}, Strides({0, 8})},
TC{"OneColZeroRowStride", kF64, {5, 1}, Strides({8, 0})},
TC{"NonCompactSingleDimension", kS32, {5}, Strides({16}), 68},
TC{"NonCompactDim0", kS32, {4, 3, 5}, Strides({120, 20, 4}), 420},
TC{"PaddedElements", kS32, {4, 3, 5}, Strides({120, 40, 8}), 476}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionSuccess, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
const size_t expected_size = tc.expected_size.value_or(
dtype.byte_size().value() * shape.num_elements());
std::string data(expected_size, 'a');
TF_ASSERT_OK_AND_ASSIGN(auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data.data(), dtype, shape, tc.byte_strides));
EXPECT_EQ(mem_region1.zeroth_element(), data.data());
EXPECT_EQ(mem_region1.mem_region().data(), data.data());
EXPECT_EQ(mem_region1.mem_region().size(), data.size());
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region2, ArrayMemRegion::FromMinimalMemRegion(data, dtype, shape,
tc.byte_strides));
EXPECT_EQ(mem_region2.zeroth_element(), data.data());
EXPECT_EQ(mem_region2.mem_region().data(), data.data());
EXPECT_EQ(mem_region2.mem_region().size(), data.size());
}
class ArrayMemRegionFailure : public TestWithParam<TC> {};
INSTANTIATE_TEST_SUITE_P(
Tests, ArrayMemRegionFailure,
testing::Values(
TC{"OneString", kString, {}, std::nullopt},
TC{"ManyStrings", kString, {5}, std::nullopt},
TC{"NegativeByteStrides", kS32, {4, 3, 5}, Strides({-60, -20, -4})},
TC{"ZeroByteStride", kS32, {5, 5}, Strides({0, 0})},
TC{"SmallerByteStrideThanDataType", kS32, {5, 5}, Strides({1, 1})},
TC{"ByteStrideIndivisibleByDataType", kS32, {5, 5}, Strides({7, 7})},
TC{"NegativeShapeDimension", kS32, {-5, -5}, Strides({20, 4})}),
testing::PrintToStringParamName());
TEST_P(ArrayMemRegionFailure, TestCase) {
const TC tc = GetParam();
const DType dtype(tc.dtype_kind);
const Shape shape(tc.shape);
char const* kSomeAddr = reinterpret_cast<char*>(1UL << 48);
auto mem_region1 = ArrayMemRegion::FromZerothElementPointer(
kSomeAddr, dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region1.status(), Not(IsOk()));
const size_t kSomeSize = 1024;
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
absl::string_view(kSomeAddr, kSomeSize), dtype, shape, tc.byte_strides);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
}
TEST(ArrayMemRegion, FromBadMemRegionSizeFails) {
const DType kDType(kS32);
const Shape kShape({5, 5});
const size_t kDataBytes = kDType.byte_size().value() * kShape.num_elements();
const size_t kExtraSuffixBytes = 10;
std::string data_with_extra_suffix(kDataBytes + kExtraSuffixBytes, 'a');
TF_ASSERT_OK_AND_ASSIGN(
auto mem_region1,
ArrayMemRegion::FromZerothElementPointer(
data_with_extra_suffix.data(), kDType, kShape,
std::nullopt));
EXPECT_EQ(mem_region1.mem_region().data(), data_with_extra_suffix.data());
EXPECT_EQ(mem_region1.zeroth_element(), data_with_extra_suffix.data());
EXPECT_LT(mem_region1.mem_region().size(), data_with_extra_suffix.size());
EXPECT_EQ(mem_region1.mem_region().size(), kDataBytes);
auto mem_region2 = ArrayMemRegion::FromMinimalMemRegion(
data_with_extra_suffix, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region2.status(), Not(IsOk()));
std::string data_without_some_bytes(kDataBytes - kExtraSuffixBytes, 'a');
auto mem_region3 = ArrayMemRegion::FromMinimalMemRegion(
data_without_some_bytes, kDType, kShape,
std::nullopt);
EXPECT_THAT(mem_region3.status(), Not(IsOk()));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/common/array_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt_proxy/common/array_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b79fb86-bfa2-4e02-9747-fa99dd1a5572 | cpp | tensorflow/tensorflow | max_unpooling_2d | tensorflow/lite/kernels/perception/max_unpooling_2d.cc | tensorflow/lite/kernels/perception/max_unpooling_2d_test.cc | #include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace max_unpooling_2d {
constexpr int kDataInputTensor = 0;
constexpr int kIndicesTensor = 1;
constexpr int kOutputTensor = 0;
inline void MaxUnpooling(const RuntimeShape& input_shape,
const float* input_data, const int32_t* indices_data,
const RuntimeShape& output_shape, float* output_data) {
std::memset(output_data, 0, output_shape.FlatSize() * sizeof(float));
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int batch_stride =
output_shape.Dims(1) * output_shape.Dims(2) * output_shape.Dims(3);
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < input_shape.Dims(1); ++in_y) {
for (int in_x = 0; in_x < input_shape.Dims(2); ++in_x) {
for (int channel = 0; channel < depth; ++channel) {
const auto input_offset =
Offset(input_shape, batch, in_y, in_x, channel);
int idx = indices_data[input_offset];
output_data[batch * batch_stride + idx] = input_data[input_offset];
}
}
}
}
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* params =
reinterpret_cast<const TfLitePoolParams*>(node->custom_initial_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
TF_LITE_ENSURE(context, indices != nullptr);
TF_LITE_ENSURE_EQ(context, NumDimensions(indices), 4);
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, output->type, kTfLiteFloat32);
TF_LITE_ENSURE_EQ(context, indices->type, kTfLiteInt32);
TF_LITE_ENSURE(context, params->padding != kTfLitePaddingUnknown);
const RuntimeShape input_shape = GetTensorShape(input);
const RuntimeShape indices_shape = GetTensorShape(indices);
TF_LITE_ENSURE_MSG(
context, input_shape.DimensionsCount() == indices_shape.DimensionsCount(),
"Input and indices must have the same shape.");
for (int i = 0; i < input_shape.DimensionsCount(); ++i) {
TF_LITE_ENSURE_MSG(context, input_shape.Dims(i) == indices_shape.Dims(i),
"Input and indices must have the same shape.");
}
int batches = input->dims->data[0];
int height = input->dims->data[1];
int width = input->dims->data[2];
int channels_out = input->dims->data[3];
int out_width, out_height;
if (params->padding == kTfLitePaddingSame) {
out_width = width * params->stride_width;
out_height = height * params->stride_height;
} else {
out_width = (width - 1) * params->stride_width + params->filter_width;
out_height = (height - 1) * params->stride_height + params->filter_height;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(4);
output_size->data[0] = batches;
output_size->data[1] = out_height;
output_size->data[2] = out_width;
output_size->data[3] = channels_out;
return context->ResizeTensor(context, output, output_size);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
TF_LITE_ENSURE(context, output != nullptr);
const TfLiteTensor* input = GetInput(context, node, kDataInputTensor);
TF_LITE_ENSURE(context, input != nullptr);
const TfLiteTensor* indices = GetInput(context, node, kIndicesTensor);
TF_LITE_ENSURE(context, indices != nullptr);
MaxUnpooling(GetTensorShape(input), GetTensorData<float>(input),
GetTensorData<int32_t>(indices), GetTensorShape(output),
GetTensorData<float>(output));
return kTfLiteOk;
}
}
TfLiteRegistration* RegisterMaxUnpooling2D() {
static TfLiteRegistration reg = {nullptr,
nullptr, max_unpooling_2d::Prepare,
max_unpooling_2d::Eval};
return ®
}
TfLiteRegistration* Register_MAX_UNPOOLING2D() {
return RegisterMaxUnpooling2D();
}
}
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/perception/perception_ops.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using testing::ElementsAreArray;
class MaxUnpoolingOpModel : public SingleOpModel {
public:
MaxUnpoolingOpModel(const TensorData& input, const TensorData& indices,
int stride_height, int stride_width, int filter_height,
int filter_width, TfLitePadding padding,
const TensorData& output) {
input_ = AddInput(input);
indices_ = AddInput(indices);
output_ = AddOutput(output);
TfLitePoolParams params{padding, stride_width, stride_height,
filter_width, filter_height, kTfLiteActNone};
uint8_t* params_ptr = reinterpret_cast<uint8_t*>(¶ms);
std::vector<uint8_t> custom_option;
custom_option.assign(params_ptr, params_ptr + sizeof(TfLitePoolParams));
SetCustomOp("MaxUnpooling2D", custom_option, RegisterMaxUnpooling2D);
BuildInterpreter({GetShape(input_), GetShape(indices_)});
}
void SetInput(const std::vector<float>& data) {
PopulateTensor(input_, data);
}
void SetIndices(const std::vector<int32_t>& data) {
PopulateTensor(indices_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int indices_;
int output_;
};
TEST(MaxUnpoolingOpTest, DimensionMisMatchTest) {
EXPECT_DEATH(MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {1, 1, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}}),
"Input and indices must have the same shape.");
}
TEST(MaxUnpoolingOpTest, SimpleTest) {
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {1, 1, 2, 1}},
{TensorType_INT32, {1, 1, 2, 1}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput({13, 4});
model.SetIndices({1, 6});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 2, 4, 1}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({0, 13, 0, 0, 0, 0, 4, 0}));
}
TEST(MaxUnpoolingOpTest, Strides2x1Test) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 2;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> indices_data{0, 3, 4, 7, 8, 11, 12, 15};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 1,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 2, 2}));
EXPECT_THAT(model.GetOutput(), ElementsAreArray({1, 0, 0, 2, 3, 0, 0, 4, 5, 0,
0, 6, 7, 0, 0, 8}));
}
TEST(MaxUnpoolingOpTest, Strides2x2Test) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 1;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<int32_t> indices_data{0, 5, 10, 13, 19, 20, 27, 31};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 2,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 8, 1}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray({1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 3, 0, 0, 4, 0, 0,
0, 0, 0, 5, 6, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 8}));
}
TEST(MaxUnpoolingOpTest, PaddingValidTest) {
constexpr int kInputB = 1;
constexpr int kInputH = 2;
constexpr int kInputW = 2;
constexpr int kInputC = 1;
std::vector<float> input_data{7, 10, 20, 19};
std::vector<int32_t> indices_data{6, 9, 16, 19};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 2,
2, 3,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 4, 5, 1}));
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 7, 0, 0, 10,
0, 0, 0, 0, 0, 0, 20, 0, 0, 19}));
}
TEST(MaxUnpoolingOpTest, InputWithBatchTest) {
constexpr int kInputB = 2;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
std::vector<int32_t> indices_data{2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 5, 86, 63};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 3,
2, 2,
kTfLitePaddingSame,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 12, 2}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray(
{0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 16, 0, 0, 0, 0, 0, 0,
11, 0, 0, 12, 0, 0, 0, 14, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 17, 18, 0, 0, 0, 30, 0, 20, 0, 0, 0, 0,
0, 0, 21, 0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0,
19, 0, 0, 0, 0, 22, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0,
0, 0, 27, 28, 0, 0, 0, 0, 29, 0, 0, 0, 0, 0, 0, 32, 0, 0,
0, 0, 25, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
}
TEST(MaxUnpoolingOpTest, InputWithBatchAndPaddingValidTest) {
constexpr int kInputB = 2;
constexpr int kInputH = 2;
constexpr int kInputW = 4;
constexpr int kInputC = 2;
std::vector<float> input_data{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32};
std::vector<int32_t> indices_data{2, 23, 8, 9, 12, 15, 40, 43, 44, 47, 72,
75, 80, 79, 62, 65, 0, 1, 30, 7, 14, 35,
42, 21, 68, 69, 50, 51, 56, 5, 86, 63};
MaxUnpoolingOpModel model(
{TensorType_FLOAT32, {kInputB, kInputH, kInputW, kInputC}},
{TensorType_INT32, {kInputB, kInputH, kInputW, kInputC}},
2, 3,
2, 2,
kTfLitePaddingValid,
{TensorType_FLOAT32, {}});
model.SetInput(input_data);
model.SetIndices(indices_data);
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 4, 11, 2}));
EXPECT_THAT(
model.GetOutput(),
ElementsAreArray(
{0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 0, 5, 0, 0, 6, 0, 0,
0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 7, 0, 0, 8, 9, 0, 0, 10, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 16, 0, 0, 0, 0, 0, 0,
11, 0, 0, 12, 0, 0, 0, 14, 13, 0, 0, 0, 0, 0, 0, 0, 17, 18,
0, 0, 0, 30, 0, 20, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0,
0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 22, 0, 0,
0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 27, 28, 0, 0, 0, 0,
29, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 25, 26, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0}));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_unpooling_2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/perception/max_unpooling_2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4f0a6ed-ff45-41a0-bf78-a11ec8f1d3c1 | cpp | tensorflow/tensorflow | logging_hooks | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.cc | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <memory>
#include <string>
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/core/util/debug_data_dumper.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::PassManager;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/file_statistics.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::DialectRegistry;
using mlir::LogicalResult;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::PassManager;
using mlir::func::FuncOp;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/internal/testdata/");
}
class LoggingHooksTest : public ::testing::Test {
public:
LoggingHooksTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LoggingHooksTest, DumpsPassData) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
TF_ASSERT_OK(CreateMlirModule("dead_const.mlir"));
PassManager pass_manager(&context_);
pass_manager.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
EnablePassIRPrinting(pass_manager, test_group_name_);
LogicalResult pass_status = pass_manager.run(mlir_module_.get());
EXPECT_TRUE(pass_status.succeeded());
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(2));
}
};
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/logging_hooks_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8f93cfd3-d9d0-4582-9743-597d6d1201cb | cpp | abseil/abseil-cpp | string_constant | absl/strings/internal/string_constant.h | absl/strings/internal/string_constant_test.cc | #ifndef ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
#define ABSL_STRINGS_INTERNAL_STRING_CONSTANT_H_
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
template <typename T>
struct StringConstant {
private:
static constexpr bool TryConstexprEval(absl::string_view view) {
return view.empty() || 2 * view[0] != 1;
}
public:
static constexpr absl::string_view value = T{}();
constexpr absl::string_view operator()() const { return value; }
static_assert(TryConstexprEval(value),
"The input string_view must point to constant data.");
};
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
template <typename T>
constexpr absl::string_view StringConstant<T>::value;
#endif
template <typename T>
constexpr StringConstant<T> MakeStringConstant(T) {
return {};
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/internal/string_constant.h"
#include "absl/meta/type_traits.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace {
using absl::strings_internal::MakeStringConstant;
struct Callable {
constexpr absl::string_view operator()() const {
return absl::string_view("Callable", 8);
}
};
TEST(StringConstant, Traits) {
constexpr auto str = MakeStringConstant(Callable{});
using T = decltype(str);
EXPECT_TRUE(std::is_empty<T>::value);
EXPECT_TRUE(std::is_trivial<T>::value);
EXPECT_TRUE(absl::is_trivially_default_constructible<T>::value);
EXPECT_TRUE(absl::is_trivially_copy_constructible<T>::value);
EXPECT_TRUE(absl::is_trivially_move_constructible<T>::value);
EXPECT_TRUE(absl::is_trivially_destructible<T>::value);
}
TEST(StringConstant, MakeFromCallable) {
constexpr auto str = MakeStringConstant(Callable{});
using T = decltype(str);
EXPECT_EQ(Callable{}(), T::value);
EXPECT_EQ(Callable{}(), str());
}
TEST(StringConstant, MakeFromStringConstant) {
constexpr auto str = MakeStringConstant(Callable{});
constexpr auto str2 = MakeStringConstant(str);
using T = decltype(str2);
EXPECT_EQ(Callable{}(), T::value);
EXPECT_EQ(Callable{}(), str2());
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/string_constant.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/string_constant_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
692945d3-8470-407f-ae14-61af4fa1bf80 | cpp | tensorflow/tensorflow | scatter_op | tensorflow/core/kernels/scatter_op.cc | tensorflow/core/kernels/scatter_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/scatter_functor.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/determinism.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
static bool ValidShapes(const Tensor& params, const Tensor& updates,
const Tensor& indices) {
if (updates.dims() == 0) return true;
if (updates.dims() != indices.dims() + params.dims() - 1) return false;
for (int d = 0; d < indices.dims(); d++) {
if (updates.dim_size(d) != indices.dim_size(d)) {
return false;
}
}
for (int d = 1; d < params.dims(); d++) {
if (params.dim_size(d) != updates.dim_size(d - 1 + indices.dims())) {
return false;
}
}
return true;
}
static void DoValidationChecking(OpKernelContext* c, const Tensor& params,
const Tensor& indices, const Tensor& updates) {
OP_REQUIRES(c, params.IsInitialized(),
errors::FailedPrecondition("Null ref for params"));
OP_REQUIRES(c, TensorShapeUtils::IsVectorOrHigher(params.shape()),
errors::InvalidArgument("params must be at least 1-D, got shape ",
params.shape().DebugString()));
OP_REQUIRES(
c, ValidShapes(params, updates, indices),
errors::InvalidArgument("Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got ",
"updates.shape ", updates.shape().DebugString(),
", indices.shape ", indices.shape().DebugString(),
", params.shape ", params.shape().DebugString()));
}
template <typename Device, typename T, typename Index, scatter_op::UpdateOp op>
class ScatterUpdateOp : public OpKernel {
public:
explicit ScatterUpdateOp(OpKernelConstruction* c) : OpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("use_locking", &use_exclusive_lock_));
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES(
c, !OpDeterminismRequired(),
errors::Unimplemented(
"Determinism is not yet supported in GPU implementation of "
"Scatter ops with ref inputs. Consider using resource variables "
"instead if you want to run Scatter when op determinism is "
"enabled."));
}
}
void Compute(OpKernelContext* c) override {
if (use_exclusive_lock_) {
mutex_lock l(*c->input_ref_mutex(0));
DoCompute(c);
} else {
DoCompute(c);
}
}
private:
bool use_exclusive_lock_;
void DoCompute(OpKernelContext* c) {
Tensor params = c->mutable_input(0, use_exclusive_lock_);
const Tensor& indices = c->input(1);
const Tensor& updates = c->input(2);
DoValidationChecking(c, params, indices, updates);
if (!c->status().ok()) return;
const int64_t N_big = indices.NumElements();
OP_REQUIRES(
c, N_big <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("indices has too many elements for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", N_big, " > ",
std::numeric_limits<Index>::max()));
const Index N = static_cast<Index>(indices.NumElements());
OP_REQUIRES(
c, params.dim_size(0) <= std::numeric_limits<Index>::max(),
errors::InvalidArgument("params.shape[0] too large for ",
DataTypeString(DataTypeToEnum<Index>::v()),
" indexing: ", params.dim_size(0), " > ",
std::numeric_limits<Index>::max()));
c->forward_ref_input_to_ref_output(0, 0);
if (N > 0) {
auto indices_flat = indices.flat<Index>();
auto params_flat = params.flat_outer_dims<T>();
if (TensorShapeUtils::IsScalar(updates.shape())) {
const auto update = updates.scalar<T>();
functor::ScatterScalarFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, update, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params.dim_size(0), ")"));
} else {
auto updates_flat =
updates.shaped<T, 2>({N, updates.NumElements() / N});
functor::ScatterFunctor<Device, T, Index, op> functor;
const Index bad_i = functor(c, c->template eigen_device<Device>(),
params_flat, updates_flat, indices_flat);
OP_REQUIRES(c, bad_i < 0,
errors::InvalidArgument(
"indices", SliceDebugString(indices.shape(), bad_i),
" = ", indices_flat(bad_i), " is not in [0, ",
params.dim_size(0), ")"));
}
}
}
};
#define REGISTER_SCATTER_KERNEL_INDEX(type, index_type, dev, name, op) \
REGISTER_KERNEL_BUILDER(Name(name) \
.Device(DEVICE_##dev) \
.TypeConstraint<type>("T") \
.TypeConstraint<index_type>("Tindices"), \
ScatterUpdateOp<dev##Device, type, index_type, op>)
#define REGISTER_SCATTER_KERNEL(type, dev, name, op) \
REGISTER_SCATTER_KERNEL_INDEX(type, int32, dev, name, op); \
REGISTER_SCATTER_KERNEL_INDEX(type, int64_t, dev, name, op);
#define REGISTER_SCATTER_ARITHMETIC(type, dev) \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterAdd", scatter_op::UpdateOp::ADD); \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterDiv", scatter_op::UpdateOp::DIV); \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterMul", scatter_op::UpdateOp::MUL); \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterSub", scatter_op::UpdateOp::SUB);
#define REGISTER_SCATTER_MINMAX(type, dev) \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterMin", scatter_op::UpdateOp::MIN); \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterMax", scatter_op::UpdateOp::MAX);
#define REGISTER_SCATTER_UPDATE(type, dev) \
REGISTER_SCATTER_KERNEL(type, dev, "ScatterUpdate", \
scatter_op::UpdateOp::ASSIGN);
#define REGISTER_SCATTER_ARITHMETIC_CPU(type) \
REGISTER_SCATTER_ARITHMETIC(type, CPU);
#define REGISTER_SCATTER_MINMAX_CPU(type) REGISTER_SCATTER_MINMAX(type, CPU);
#define REGISTER_SCATTER_UPDATE_CPU(type) REGISTER_SCATTER_UPDATE(type, CPU);
TF_CALL_REAL_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_CPU);
TF_CALL_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_CPU);
TF_CALL_ALL_TYPES(REGISTER_SCATTER_UPDATE_CPU);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER_SCATTER_ARITHMETIC_GPU(type) \
REGISTER_SCATTER_ARITHMETIC(type, GPU);
#define REGISTER_SCATTER_MINMAX_GPU(type) REGISTER_SCATTER_MINMAX(type, GPU);
#define REGISTER_SCATTER_UPDATE_GPU(type) REGISTER_SCATTER_UPDATE(type, GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_ARITHMETIC_GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_MINMAX_GPU);
TF_CALL_GPU_NUMBER_TYPES(REGISTER_SCATTER_UPDATE_GPU);
#endif
#undef REGISTER_SCATTER_ARITHMETIC
#undef REGISTER_SCATTER_ARITHMETIC_CPU
#undef REGISTER_SCATTER_ARITHMETIC_GPU
#undef REGISTER_SCATTER_MINMAX
#undef REGISTER_SCATTER_MINMAX_CPU
#undef REGISTER_SCATTER_MINMAX_GPU
#undef REGISTER_SCATTER_UPDATE
#undef REGISTER_SCATTER_UPDATE_CPU
#undef REGISTER_SCATTER_UPDATE_GPU
#undef REGISTER_SCATTER_KERNEL
#undef REGISTER_SCATTER_KERNEL_INDEX
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class ScatterUpdateOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
class ScatterSubOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterSub")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterUpdateOpTest, Simple_StringType) {
MakeOp(DT_STRING_REF, DT_INT32);
AddInputFromArray<tstring>(TensorShape({1}), {"Brain"});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<tstring>(TensorShape({1}), {"TensorFlow"});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_STRING, TensorShape({1}));
test::FillValues<tstring>(&expected, {"TensorFlow"});
test::ExpectTensorEqual<tstring>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_BoolType) {
MakeOp(DT_BOOL_REF, DT_INT32);
AddInputFromArray<bool>(TensorShape({1}), {false});
AddInputFromArray<int32>(TensorShape({1}), {0});
AddInputFromArray<bool>(TensorShape({1}), {true});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_BOOL, TensorShape({1}));
test::FillValues<bool>(&expected, {true});
test::ExpectTensorEqual<bool>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_TwoD32) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_Two64) {
MakeOp(DT_FLOAT_REF, DT_INT64);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int64_t>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_ZeroD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({}), {3});
AddInputFromArray<float>(TensorShape({}), {101});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {0, 0, 0, 101, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Simple_OneD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, HigherRank) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({2, 3}), {0, 4, 2, 1, 3, 6});
AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 99});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "indices[2] = 99 is not in [0, 5)"))
<< s;
}
TEST_F(ScatterSubOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({14}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 1, 99});
AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
Status s = RunOpKernel();
EXPECT_TRUE(
absl::StrContains(s.ToString(), "indices[2] = 99 is not in [0, 14)"))
<< s;
}
TEST_F(ScatterSubOpTest, StressIndexTest) {
MakeOp(DT_INT32_REF, DT_INT32);
const int kRows = 1;
std::vector<int32> values(kRows, 0);
const int kNumUpdates = 1000000;
std::vector<int32> indices(kNumUpdates, 0);
std::vector<int32> updates(kNumUpdates, 1);
AddInputFromArray<int32>(TensorShape({kRows}), values);
AddInputFromArray<int32>(TensorShape({kNumUpdates}), indices);
AddInputFromArray<int32>(TensorShape({kNumUpdates}), updates);
Status s = RunOpKernel();
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_INT32, TensorShape({1}));
test::FillValues<int32>(&expected, {-1000000});
test::ExpectTensorEqual<int32>(expected, params_tensor);
}
TEST_F(ScatterUpdateOpTest, Error_WrongDimsIndices) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1, 3}), {0, 4, 99});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got "))
<< s;
}
TEST_F(ScatterUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(
TensorShape({3, 4}),
{100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got "))
<< s;
}
TEST_F(ScatterUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({2, 3}),
{100, 101, 102, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(s.ToString(),
"Must have updates.shape = indices.shape + "
"params.shape[1:] or updates.shape = [], got "))
<< s;
}
class ScatterUpdateBM : public ScatterUpdateOpTest {
public:
void TestBody() override {}
void MakeBenchmarkOp(const char* op, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", op)
.Input(FakeInput(DT_FLOAT_REF))
.Input(FakeInput(index_type))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
}
};
template <typename Index>
void BM_ScatterHelper(::testing::benchmark::State& state, int embedding_size,
const char* op, bool big_num_updates = false) {
const int kRows = 10000000 / embedding_size;
std::vector<float> values;
values.reserve(kRows);
for (int i = 0; i < kRows * embedding_size; i++) {
values.push_back(i);
}
const int kNumUpdates = big_num_updates ? 1000000 : 1000;
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<Index> indices;
std::vector<float> updates;
for (int i = 0; i < kNumUpdates; i++) {
indices.push_back(rnd.Uniform(kRows));
for (int j = 0; j < embedding_size; j++) {
updates.push_back(i * 10 + j);
}
}
ScatterUpdateBM bm;
bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v());
bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values);
bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices);
bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}),
updates);
for (auto i : state) {
Status s = bm.RunOpKernel();
}
state.SetItemsProcessed((static_cast<int64_t>(kNumUpdates) * embedding_size) *
state.iterations());
}
void BM_ScatterUpdateInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterUpdate");
}
void BM_ScatterUpdateInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterUpdate");
}
void BM_ScatterAddInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterAdd");
}
void BM_ScatterAddInt32Large(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterAdd", true);
}
void BM_ScatterAddInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterAdd");
}
void BM_ScatterMulInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterMul");
}
void BM_ScatterMulInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMul");
}
void BM_ScatterDivInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterDiv");
}
void BM_ScatterDivInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterDiv");
}
void BM_ScatterMinInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterMin");
}
void BM_ScatterMinInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMin");
}
void BM_ScatterMaxInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int32>(state, embedding_size, "ScatterMax");
}
void BM_ScatterMaxInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterHelper<int64_t>(state, embedding_size, "ScatterMax");
}
BENCHMARK(BM_ScatterUpdateInt32)
->Arg(1)
->Arg(10)
->Arg(32)
->Arg(50)
->Arg(64)
->Arg(80)
->Arg(96)
->Arg(112)
->Arg(192)
->Arg(256)
->Arg(1024)
->Arg(10000)
->Arg(100000)
->Arg(1000000);
BENCHMARK(BM_ScatterUpdateInt64)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024)
->Arg(100000);
BENCHMARK(BM_ScatterAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterAddInt32Large)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024);
BENCHMARK(BM_ScatterAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMulInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMulInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterDivInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterDivInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMinInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMinInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMaxInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterMaxInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7484ff92-acf1-42ef-86b8-36fb3c19fc85 | cpp | tensorflow/tensorflow | hlo_instruction | third_party/xla/xla/hlo/ir/hlo_instruction.cc | third_party/xla/xla/service/hlo_instruction_test.cc | #include "xla/hlo/ir/hlo_instruction.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iostream>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_original_value.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/hlo_sharding_metadata.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_lexer.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/sort_json.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/iterator_range.h"
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::CEscape;
using absl::StrAppend;
using absl::StrCat;
using absl::StrJoin;
const HloInstruction::Rare* const HloInstruction::kEmptyRare =
new HloInstruction::Rare;
namespace {
template <typename T>
absl::Status EraseElementFromVector(PtrVec<T>* container, T value) {
auto it = std::find(container->begin(), container->end(), value);
TF_RET_CHECK(it != container->end());
container->erase(it);
return absl::OkStatus();
}
}
HloInstruction::Users::~Users() = default;
void HloInstruction::Users::Clear() {
users_.clear();
user_map_.reset(nullptr);
DCHECK(CheckInvariants());
}
bool HloInstruction::Users::Contains(const HloInstruction* instruction) const {
if (user_map_ == nullptr) {
return std::find(users_.begin(), users_.end(), instruction) != users_.end();
} else {
return user_map_->contains(instruction);
}
}
void HloInstruction::Users::AddUser(HloInstruction* user) {
if (!Contains(user)) {
if (user_map_ == nullptr && users_.size() >= kMapThreshold) {
user_map_ =
std::make_unique<absl::flat_hash_map<const HloInstruction*, int64_t>>(
users_.size());
RebuildMap();
DCHECK(CheckInvariants());
}
if (user_map_ != nullptr) {
user_map_->emplace(user, users_.size());
}
users_.push_back(user);
DCHECK(CheckInvariants());
}
}
int64_t HloInstruction::Users::UserId(HloInstruction* user) {
if (user_map_ == nullptr) {
auto it = std::find(users_.begin(), users_.end(), user);
CHECK(it != users_.end());
return it - users_.begin();
} else {
auto result = user_map_->find(user);
CHECK(result != user_map_->end());
return result->second;
}
}
void HloInstruction::Users::MaybeRemoveUser(HloInstruction* user) {
if (Contains(user)) {
RemoveUser(user);
DCHECK(CheckInvariants());
}
}
void HloInstruction::Users::RemoveUser(HloInstruction* user) {
const int64_t index = UserId(user);
CHECK_EQ(users_[index], user);
HloInstruction* last = users_.back();
if (user_map_ != nullptr) {
(*user_map_)[last] = index;
user_map_->erase(user);
}
users_[index] = last;
users_.pop_back();
DCHECK(CheckInvariants());
}
void HloInstruction::Users::SortInstructionUsers(
const MappedPtrContainerSorter<HloInstruction>::MapPtrFn& map_fn,
const Users& sorted_instruction_users) {
using Sorter = MappedPtrContainerSorter<HloInstruction>;
auto status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction_users.users_, users_);
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction users: " << status;
}
if (user_map_ != nullptr) {
user_map_->clear();
RebuildMap();
}
DCHECK(CheckInvariants());
}
void HloInstruction::Users::RebuildMap() {
for (uint64_t i = 0; i < users_.size(); ++i) {
(*user_map_)[users_[i]] = i;
}
}
bool HloInstruction::Users::CheckInvariants() {
if (user_map_ != nullptr) {
CHECK_EQ(users_.size(), user_map_->size());
}
return true;
}
void HloInstruction::AppendComputation(HloComputation* computation) {
mutable_rare()->called_computations.push_back(computation);
}
HloInstruction* HloInstruction::AddInstruction(
std::unique_ptr<HloInstruction> derived_instruction) {
HloInstruction* derived =
parent()->AddInstruction(std::move(derived_instruction));
const bool has_prior_sharding = derived->has_sharding();
SetupDerivedInstruction(derived);
if (!has_prior_sharding && (derived->opcode() == HloOpcode::kReshape ||
derived->opcode() == HloOpcode::kTranspose)) {
derived->clear_sharding();
}
return derived;
}
absl::StatusOr<std::unique_ptr<HloInstruction>> HloInstruction::CreateFromProto(
const HloInstructionProto& proto,
const absl::flat_hash_map<int64_t, HloInstruction*>& instruction_map,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map,
bool prohibit_empty_literal) {
TF_RET_CHECK(!proto.opcode().empty());
HloOpcode opcode;
auto opcode_or = StringToHloOpcode(proto.opcode());
std::optional<ComparisonDirection> comparison_direction;
if (opcode_or.ok()) {
opcode = std::move(opcode_or).value();
} else {
if (proto.opcode() == "equal-to") {
comparison_direction = ComparisonDirection::kEq;
} else if (proto.opcode() == "not-equal-to") {
comparison_direction = ComparisonDirection::kNe;
} else if (proto.opcode() == "greater-than-or-equal-to") {
comparison_direction = ComparisonDirection::kGe;
} else if (proto.opcode() == "greater-than") {
comparison_direction = ComparisonDirection::kGt;
} else if (proto.opcode() == "less-than-or-equal-to") {
comparison_direction = ComparisonDirection::kLe;
} else if (proto.opcode() == "less-than") {
comparison_direction = ComparisonDirection::kLt;
}
if (comparison_direction) {
opcode = HloOpcode::kCompare;
} else {
return InvalidArgument("Unknown opcode: %s", proto.opcode());
}
}
TF_RET_CHECK(proto.has_shape());
std::unique_ptr<HloInstruction> instruction;
const auto operands = [&instruction_map, &proto](int index) {
return instruction_map.at(proto.operand_ids(index));
};
const auto all_operands = [&instruction_map, &proto]() {
std::vector<HloInstruction*> result(proto.operand_ids_size());
std::transform(proto.operand_ids().begin(), proto.operand_ids().end(),
result.begin(), [&instruction_map](int64_t operand_id) {
return instruction_map.at(operand_id);
});
return result;
};
const auto output_to_operand_aliasing = [&proto]() {
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
output_to_operand_aliasing;
for (const auto& aliasing : proto.output_operand_aliasing()) {
output_to_operand_aliasing.emplace_back(
ShapeIndex(aliasing.output_shape_index().begin(),
aliasing.output_shape_index().end()),
std::make_pair(aliasing.operand_index(),
ShapeIndex(aliasing.operand_shape_index().begin(),
aliasing.operand_shape_index().end())));
}
return output_to_operand_aliasing;
};
const auto computations = [&computation_map, &proto](int index) {
return computation_map.at(proto.called_computation_ids(index));
};
const auto all_computations = [&computation_map, &proto]() {
std::vector<HloComputation*> result(proto.called_computation_ids_size());
std::transform(proto.called_computation_ids().begin(),
proto.called_computation_ids().end(), result.begin(),
[&computation_map](int64_t computation_id) {
return computation_map.at(computation_id);
});
return result;
};
TF_RET_CHECK(
absl::c_all_of(proto.operand_ids(),
[&](int64_t id) { return instruction_map.contains(id); }))
<< proto.name() << " instruction contains invalid operand id(s)";
TF_RET_CHECK(
absl::c_all_of(proto.called_computation_ids(),
[&](int64_t id) { return computation_map.contains(id); }))
<< proto.name() << " instruction references invalid computation id(s)";
Shape shape(proto.shape());
TF_RETURN_IF_ERROR(ShapeUtil::ValidateShapeWithOptionalLayout(shape));
std::optional<int> arity = HloOpcodeArity(opcode);
if (arity) {
TF_RET_CHECK(proto.operand_ids_size() == *arity)
<< proto.opcode() << " instruction should have " << *arity
<< " operands but sees " << proto.operand_ids_size();
}
switch (opcode) {
case HloOpcode::kBatchNormTraining:
instruction =
CreateBatchNormTraining(shape, operands(0), operands(1), operands(2),
proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kBatchNormInference:
instruction = CreateBatchNormInference(
shape, operands(0), operands(1), operands(2), operands(3),
operands(4), proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kBatchNormGrad:
instruction = CreateBatchNormGrad(shape, operands(0), operands(1),
operands(2), operands(3), operands(4),
proto.epsilon(), proto.feature_index());
break;
case HloOpcode::kFft: {
std::vector<int64_t> fft_length(proto.fft_length().begin(),
proto.fft_length().end());
instruction = CreateFft(shape, operands(0), proto.fft_type(),
absl::Span<const int64_t>(fft_length));
break;
}
case HloOpcode::kAsyncStart: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Async start instruction should have 1 called computation but "
"sees "
<< proto.called_computation_ids_size();
instruction = CreateAsyncStart(shape, all_operands(), computations(0),
proto.async_execution_thread().empty()
? kMainExecutionThread
: proto.async_execution_thread());
break;
}
case HloOpcode::kAsyncUpdate: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Async update requires one singular operand";
HloInstruction* prev_op = operands(0);
TF_RET_CHECK(prev_op->IsAsynchronous())
<< "Async update requires its operand to be an asynchronous op";
if (!proto.async_execution_thread().empty()) {
TF_RET_CHECK(proto.async_execution_thread() ==
prev_op->async_execution_thread())
<< "Async update should have " << prev_op->async_execution_thread()
<< " async_execution_thread, but sees "
<< proto.async_execution_thread();
}
if (!proto.called_computation_ids().empty()) {
TF_RET_CHECK(computations(0) == prev_op->async_wrapped_computation())
<< "Async update should have "
<< prev_op->async_wrapped_computation()->name()
<< " async_wrapped_computation, but sees "
<< computations(0)->name();
}
instruction = CreateAsyncUpdate(shape, prev_op);
break;
}
case HloOpcode::kAsyncDone: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "Async done requires one singular operand";
HloInstruction* prev_op = operands(0);
TF_RET_CHECK(prev_op->IsAsynchronous())
<< "Async done requires its operand to be an asynchronous op";
if (!proto.async_execution_thread().empty()) {
TF_RET_CHECK(proto.async_execution_thread() ==
prev_op->async_execution_thread())
<< "Async done should have " << prev_op->async_execution_thread()
<< " async_execution_thread, but sees "
<< proto.async_execution_thread();
}
if (!proto.called_computation_ids().empty()) {
TF_RET_CHECK(computations(0) == prev_op->async_wrapped_computation())
<< "Async done should have "
<< prev_op->async_wrapped_computation()->name()
<< " async_wrapped_computation, but sees "
<< computations(0)->name();
}
instruction = CreateAsyncDone(shape, prev_op);
break;
}
case HloOpcode::kCopyStart: {
std::optional<int> cross_program_prefetch_index;
if (proto.optional_cross_program_prefetch_index_case() ==
HloInstructionProto::kCrossProgramPrefetchIndex) {
cross_program_prefetch_index =
std::make_optional(proto.cross_program_prefetch_index());
} else if (proto.is_cross_program_prefetch()) {
cross_program_prefetch_index = 0;
}
instruction =
CreateCopyStart(shape, operands(0), cross_program_prefetch_index);
break;
}
case HloOpcode::kCompare: {
if (!comparison_direction) {
TF_ASSIGN_OR_RETURN(
comparison_direction,
StringToComparisonDirection(proto.comparison_direction()));
}
auto comparison_type_str = proto.comparison_type();
if (!comparison_type_str.empty()) {
TF_ASSIGN_OR_RETURN(auto comparison_type,
StringToComparisonType(comparison_type_str));
instruction = CreateCompare(shape, operands(0), operands(1),
*comparison_direction, comparison_type);
} else {
instruction = CreateCompare(shape, operands(0), operands(1),
*comparison_direction);
}
break;
}
case HloOpcode::kTriangularSolve: {
instruction = CreateTriangularSolve(shape, operands(0), operands(1),
proto.triangular_solve_options());
break;
}
case HloOpcode::kCholesky: {
instruction =
CreateCholesky(shape, operands(0), proto.cholesky_options());
break;
}
case HloOpcode::kSend:
instruction = CreateSend(operands(0), operands(1), proto.channel_id(),
proto.is_host_transfer());
break;
case HloOpcode::kSendDone:
TF_RET_CHECK(DynCast<HloSendInstruction>(operands(0)) != nullptr)
<< "SendDone must take the context operand from Send";
instruction = CreateSendDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kRecv:
instruction = CreateRecv(shape.tuple_shapes(0), operands(0),
proto.channel_id(), proto.is_host_transfer());
break;
case HloOpcode::kRecvDone:
TF_RET_CHECK(DynCast<HloRecvInstruction>(operands(0)) != nullptr)
<< "RecvDone must take the context operand from Recv";
instruction = CreateRecvDone(operands(0), proto.is_host_transfer());
break;
case HloOpcode::kReverse:
instruction =
CreateReverse(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kConcatenate:
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "Concatenate instruction should have 1 dimension but sees "
<< proto.dimensions_size();
instruction =
CreateConcatenate(shape, all_operands(), proto.dimensions(0));
break;
case HloOpcode::kConditional: {
TF_RET_CHECK(proto.called_computation_ids_size() > 0)
<< "conditional should have at least 1 called computation";
if (operands(0)->shape().element_type() == PRED) {
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "conditional should have exactly 2 called computations but got "
<< proto.called_computation_ids_size();
}
TF_RET_CHECK(proto.operand_ids_size() ==
proto.called_computation_ids_size() + 1)
<< "conditional should have one branch_index operand plus one "
"operand per called computation but got "
<< proto.operand_ids_size() << " operands for "
<< proto.called_computation_ids_size() << " branch computations";
auto cond_operands = all_operands();
instruction =
CreateConditional(shape, cond_operands[0], all_computations(),
absl::MakeSpan(cond_operands).subspan(1));
break;
}
case HloOpcode::kReduce:
TF_RET_CHECK(proto.operand_ids_size() % 2 == 0)
<< "Reduce instruction should have an even number of operands but "
"sees "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Reduce instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
{
const auto reduce_operands = all_operands();
auto inputs = absl::MakeSpan(reduce_operands)
.subspan(0, reduce_operands.size() / 2);
auto init_values =
absl::MakeSpan(reduce_operands)
.subspan(reduce_operands.size() / 2, reduce_operands.size());
instruction =
CreateReduce(shape, inputs, init_values,
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()),
computations(0));
}
break;
case HloOpcode::kSort: {
TF_RET_CHECK(proto.operand_ids_size() >= 1)
<< "Sort instruction should have at least 1 operand but has "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.dimensions().size() == 1)
<< "Sort instruction should have 1 dimension";
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Sort instruction should one called computation but sees "
<< proto.called_computation_ids_size();
auto sort_operands = all_operands();
instruction = CreateSort(shape, proto.dimensions(0), all_operands(),
computations(0), proto.is_stable());
break;
}
case HloOpcode::kTopK: {
TF_RET_CHECK(proto.operand_ids_size() == 1)
<< "TopK instruction should have exactly 1 operand but has "
<< proto.operand_ids_size();
instruction =
CreateTopK(shape, all_operands()[0], proto.k(), proto.largest());
break;
}
case HloOpcode::kTranspose:
instruction =
CreateTranspose(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kBroadcast:
instruction =
CreateBroadcast(shape, operands(0),
std::vector<int64_t>(proto.dimensions().begin(),
proto.dimensions().end()));
break;
case HloOpcode::kMap:
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Map instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
instruction = CreateMap(shape, all_operands(), computations(0));
break;
case HloOpcode::kSlice: {
std::vector<int64_t> slice_starts, slice_limits, slice_strides;
for (const HloInstructionProto::SliceDimensions& slice_dimensions :
proto.slice_dimensions()) {
slice_starts.push_back(slice_dimensions.start());
slice_limits.push_back(slice_dimensions.limit());
slice_strides.push_back(slice_dimensions.stride());
}
instruction = CreateSlice(shape, operands(0), slice_starts, slice_limits,
slice_strides);
break;
}
case HloOpcode::kConstant: {
if (proto.has_literal()) {
TF_ASSIGN_OR_RETURN(
auto literal,
Literal::CreateFromProto(proto.literal(), prohibit_empty_literal));
instruction = CreateConstant(std::move(literal));
TF_RET_CHECK(Shape::Equal().MinorToMajorOnlyInLayout()(
instruction->shape(), shape))
<< instruction->shape().ToString(true) << " vs "
<< shape.ToString(true);
*instruction->mutable_shape() = shape;
} else {
instruction = std::make_unique<HloConstantInstruction>(shape);
}
break;
}
case HloOpcode::kFusion: {
TF_RET_CHECK(!proto.fusion_kind().empty());
TF_ASSIGN_OR_RETURN(FusionKind fusion_kind,
StringToFusionKind(proto.fusion_kind()));
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Expect 1 called computation for fusion instruction but sees "
<< proto.called_computation_ids_size();
const int64_t fusion_id = proto.called_computation_ids(0);
auto* fused_computation =
tsl::gtl::FindPtrOrNull(computation_map, fusion_id);
TF_RET_CHECK(fused_computation != nullptr)
<< "No fusion computation with id " << fusion_id;
instruction =
CreateFusion(shape, fusion_kind, all_operands(), fused_computation);
auto fusion_instr = DynCast<HloFusionInstruction>(instruction.get());
fusion_instr->set_output_to_operand_aliasing(
output_to_operand_aliasing());
break;
}
case HloOpcode::kRng:
instruction = CreateRng(shape, proto.distribution(), all_operands());
break;
case HloOpcode::kRngBitGenerator:
instruction =
CreateRngBitGenerator(shape, operands(0), proto.rng_algorithm());
break;
case HloOpcode::kRngGetAndUpdateState:
instruction = CreateRngGetAndUpdateState(shape, proto.delta());
break;
case HloOpcode::kParameter:
instruction =
CreateParameter(proto.parameter_number(), shape, proto.name());
if (!proto.parameter_replication().replicated_at_leaf_buffers().empty()) {
instruction->set_parameter_replicated_at_leaf_buffers(
proto.parameter_replication().replicated_at_leaf_buffers());
}
break;
case HloOpcode::kGetTupleElement:
instruction =
CreateGetTupleElement(shape, operands(0), proto.tuple_index());
break;
case HloOpcode::kReducePrecision:
instruction = CreateReducePrecision(
shape, operands(0), proto.exponent_bits(), proto.mantissa_bits());
break;
case HloOpcode::kInfeed: {
TF_RET_CHECK(shape.IsTuple() &&
(ShapeUtil::TupleElementCount(shape) == 2))
<< "Infeed should have a tuple shape with 2 operands, but has: "
<< shape;
const Shape& data_shape = ShapeUtil::GetTupleElementShape(shape, 0);
instruction =
CreateInfeed(data_shape, operands(0), proto.infeed_config());
} break;
case HloOpcode::kOutfeed: {
Shape outfeed_shape(proto.outfeed_shape());
TF_RETURN_IF_ERROR(
ShapeUtil::ValidateShapeWithOptionalLayout(outfeed_shape));
instruction = CreateOutfeed(outfeed_shape, operands(0), operands(1),
proto.outfeed_config());
break;
}
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "AllGather cannot have more than 1 all-gather dimensions";
int64_t all_gather_dimension = proto.dimensions(0);
if (opcode == HloOpcode::kAllGather) {
instruction = CreateAllGather(
shape, all_operands(), all_gather_dimension,
CollectiveDeviceList::FromProto(proto), proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
} else {
instruction = CreateAllGatherStart(
shape, all_operands(), all_gather_dimension,
CollectiveDeviceList::FromProto(proto), proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
}
break;
}
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kReduceScatter: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "AllReduce should have 1 called computation but sees "
<< proto.called_computation_ids_size();
TF_RET_CHECK(proto.channel_id() <= 0 || proto.all_reduce_id() <= 0)
<< "AllReduce cannot have both channel_id() and all_reduce_id()";
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
if (proto.all_reduce_id() > 0) {
channel_id = proto.all_reduce_id();
}
CollectiveDeviceList device_list = CollectiveDeviceList::FromProto(proto);
if (opcode == HloOpcode::kAllReduce) {
instruction =
CreateAllReduce(shape, all_operands(), computations(0), device_list,
proto.constrain_layout(), channel_id,
proto.use_global_device_ids());
} else if (opcode == HloOpcode::kReduceScatter) {
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "ReduceScatter cannot have more than 1 scatter dimensions";
int64_t scatter_dimension = proto.dimensions(0);
instruction = CreateReduceScatter(
shape, all_operands(), computations(0), device_list,
proto.constrain_layout(), channel_id, proto.use_global_device_ids(),
scatter_dimension);
} else {
instruction =
CreateAllReduceStart(shape, all_operands(), computations(0),
device_list, proto.constrain_layout(),
channel_id, proto.use_global_device_ids());
}
break;
}
case HloOpcode::kAllToAll: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
std::optional<int64_t> split_dimension;
if (proto.dimensions_size() > 0) {
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "AllToAll cannot have more than 1 dimension (split dimension)";
TF_RET_CHECK(all_operands().size() == 1)
<< "AllToAll must have a single operand when the split dimension "
"is specified";
split_dimension = proto.dimensions(0);
}
instruction = CreateAllToAll(
shape, all_operands(), CollectiveDeviceList::FromProto(proto),
proto.constrain_layout(), channel_id, split_dimension);
break;
}
case HloOpcode::kCollectiveBroadcast: {
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
instruction = CreateCollectiveBroadcast(
shape, all_operands(), CollectiveDeviceList::FromProto(proto), false,
channel_id);
break;
}
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart: {
TF_RET_CHECK(proto.operand_ids().size() == 1 ||
proto.operand_ids().size() == 4);
std::vector<std::pair<int64_t, int64_t>> source_target_pairs(
proto.source_target_pairs_size());
std::optional<int64_t> channel_id;
if (proto.channel_id() > 0) {
channel_id = proto.channel_id();
}
for (int i = 0; i < source_target_pairs.size(); ++i) {
source_target_pairs[i].first = proto.source_target_pairs(i).source();
source_target_pairs[i].second = proto.source_target_pairs(i).target();
}
if (proto.dynamic_slice_sizes_size() == 0) {
if (opcode == HloOpcode::kCollectivePermute) {
instruction = CreateCollectivePermute(
shape, operands(0), source_target_pairs, channel_id);
} else if (opcode == HloOpcode::kCollectivePermuteStart) {
instruction = CreateCollectivePermuteStart(
shape, operands(0), source_target_pairs, channel_id);
} else {
LOG(FATAL) << "Expect CollectivePermute or CollectivePermuteStart, "
<< "but got " << opcode;
}
} else {
std::vector<std::vector<int64_t>> slice_sizes;
HloInstruction* input = operands(0);
HloInstruction* input_start_indices = operands(2);
if (input->shape().IsTuple() &&
input->shape().tuple_shapes_size() > 1) {
slice_sizes.resize(input->shape().tuple_shapes_size());
} else {
slice_sizes.resize(1);
}
int proto_index = 0;
if (input->shape().IsTuple()) {
if (input_start_indices->shape()
.tuple_shapes(0)
.tuple_shapes(0)
.IsArray()) {
slice_sizes.resize(input->shape().tuple_shapes_size());
for (int i = 0; i < input->shape().tuple_shapes_size(); ++i) {
slice_sizes[i].resize(
input->shape().tuple_shapes(i).dimensions_size());
for (int j = 0;
j < input->shape().tuple_shapes(i).dimensions_size(); ++j) {
CHECK_GE(proto.dynamic_slice_sizes_size(), proto_index);
slice_sizes[i][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
} else {
slice_sizes.resize(
input->shape().tuple_shapes_size() *
ShapeUtil::TupleElementCount(
input_start_indices->shape().tuple_shapes(0)));
int slice_sizes_count = 0;
for (int i = 0; i < input->shape().tuple_shapes_size(); ++i) {
for (int j = 0;
j < ShapeUtil::TupleElementCount(
input_start_indices->shape().tuple_shapes(i));
++j) {
slice_sizes[slice_sizes_count].resize(
input->shape().tuple_shapes(i).rank());
for (int k = 0; k < input->shape().tuple_shapes(i).rank();
++k) {
CHECK_GE(proto.dynamic_slice_sizes_size(), proto_index);
slice_sizes[slice_sizes_count][k] =
proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
slice_sizes_count += 1;
}
}
}
} else {
slice_sizes.resize(
ShapeUtil::TupleElementCount(input_start_indices->shape()));
if (input_start_indices->shape().tuple_shapes(0).IsTuple()) {
for (int i = 0;
i < ShapeUtil::TupleElementCount(input_start_indices->shape());
++i) {
slice_sizes[i].resize(input->shape().dimensions_size());
for (int j = 0; j < input->shape().dimensions_size(); ++j) {
slice_sizes[i][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
} else {
slice_sizes.resize(1);
slice_sizes[0].resize(input->shape().dimensions_size());
for (int j = 0; j < input->shape().dimensions_size(); ++j) {
slice_sizes[0][j] = proto.dynamic_slice_sizes(proto_index);
proto_index += 1;
}
}
}
if (opcode == HloOpcode::kCollectivePermute) {
instruction = CreateCollectivePermute(
shape, operands(0), operands(1), operands(2), operands(3),
source_target_pairs, slice_sizes, channel_id);
} else if (opcode == HloOpcode::kCollectivePermuteStart) {
instruction = CreateCollectivePermuteStart(
shape, operands(0), operands(1), operands(2), operands(3),
source_target_pairs, slice_sizes, channel_id);
} else {
LOG(FATAL) << "Expect CollectivePermute or CollectivePermuteStart, "
<< "but got " << opcode;
}
}
break;
}
case HloOpcode::kReplicaId: {
instruction = CreateReplicaId(shape);
break;
}
case HloOpcode::kPartitionId: {
instruction = CreatePartitionId(shape);
break;
}
case HloOpcode::kConvolution: {
TF_RET_CHECK(proto.has_window());
TF_RET_CHECK(proto.has_convolution_dimension_numbers());
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
proto.operand_ids_size(), PrecisionConfig::DEFAULT);
instruction = CreateConvolve(
shape, operands(0), operands(1),
std::max<int64_t>(proto.feature_group_count(), 1),
std::max<int64_t>(proto.batch_group_count(), 1), proto.window(),
proto.convolution_dimension_numbers(), precision_config);
break;
}
case HloOpcode::kReduceWindow:
TF_RET_CHECK(proto.operand_ids_size() % 2 == 0)
<< "Reduce window should have an even number of operands but "
"sees "
<< proto.operand_ids_size();
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "ReduceWindow should have 1 called computation but sees "
<< proto.called_computation_ids_size();
{
const auto reduce_operands = all_operands();
auto inputs = absl::MakeSpan(reduce_operands)
.subspan(0, reduce_operands.size() / 2);
auto init_values =
absl::MakeSpan(reduce_operands)
.subspan(reduce_operands.size() / 2, reduce_operands.size());
instruction = CreateReduceWindow(shape, inputs, init_values,
proto.window(), computations(0));
}
break;
case HloOpcode::kSelectAndScatter:
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "SelectAndScatter should have 2 called computations but sees "
<< proto.called_computation_ids_size();
instruction = CreateSelectAndScatter(shape, operands(0), computations(0),
proto.window(), operands(1),
operands(2), computations(1));
break;
case HloOpcode::kCustomCall: {
if (proto.constrain_layout()) {
std::vector<Shape> operand_shapes;
const auto& operand_shapes_with_layout =
proto.operand_shapes_with_layout();
operand_shapes.reserve(operand_shapes_with_layout.size());
for (const ShapeProto& shape_proto : operand_shapes_with_layout) {
operand_shapes.emplace_back(shape_proto);
}
instruction =
CreateCustomCall(shape, all_operands(), proto.custom_call_target(),
operand_shapes, proto.backend_config());
} else {
if (proto.called_computation_ids_size() == 1) {
instruction = CreateCustomCall(shape, all_operands(), computations(0),
proto.custom_call_target(),
proto.backend_config());
} else if (proto.called_computation_ids_size() > 1) {
instruction = CreateCustomCall(
shape, all_operands(), all_computations(),
proto.custom_call_target(), proto.backend_config());
} else {
instruction = CreateCustomCall(shape, all_operands(),
proto.custom_call_target(),
proto.backend_config());
}
}
auto custom_call_instr =
Cast<HloCustomCallInstruction>(instruction.get());
if (proto.has_window()) {
custom_call_instr->set_window(proto.window());
}
if (proto.has_literal()) {
TF_ASSIGN_OR_RETURN(
auto literal,
Literal::CreateFromProto(proto.literal(), prohibit_empty_literal));
custom_call_instr->set_literal(std::move(literal));
}
if (proto.has_convolution_dimension_numbers()) {
custom_call_instr->set_convolution_dimension_numbers(
proto.convolution_dimension_numbers());
}
custom_call_instr->set_feature_group_count(std::max(
static_cast<int64_t>(proto.feature_group_count()), int64_t{1}));
custom_call_instr->set_batch_group_count(std::max(
static_cast<int64_t>(proto.batch_group_count()), int64_t{1}));
custom_call_instr->set_custom_call_has_side_effect(
proto.custom_call_has_side_effect());
custom_call_instr->set_padding_type(proto.padding_type());
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
proto.operand_ids_size(), PrecisionConfig::DEFAULT);
*custom_call_instr->mutable_precision_config() = precision_config;
custom_call_instr->set_output_to_operand_aliasing(
output_to_operand_aliasing());
custom_call_instr->set_custom_call_schedule(proto.custom_call_schedule());
custom_call_instr->set_api_version(proto.custom_call_api_version());
break;
}
case HloOpcode::kPad:
TF_RET_CHECK(proto.has_padding_config());
instruction =
CreatePad(shape, operands(0), operands(1), proto.padding_config());
break;
case HloOpcode::kDynamicSlice: {
std::vector<int64_t> slice_sizes(proto.dynamic_slice_sizes_size());
absl::c_copy(proto.dynamic_slice_sizes(), slice_sizes.begin());
TF_RET_CHECK(proto.operand_ids_size() >= 1)
<< "DynamicSlice instruction should have at least 1 operands but "
"sees "
<< proto.operand_ids_size();
if (proto.operand_ids_size() != 2 || operands(1)->shape().rank() != 1) {
auto expected_operands = 1 + operands(0)->shape().rank();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< "DynamicSlice instruction should have " << expected_operands
<< " operands, but has " << proto.operand_ids_size();
}
const auto& operand_vector = all_operands();
instruction = CreateDynamicSlice(
shape, operands(0), absl::MakeSpan(operand_vector).subspan(1),
slice_sizes);
break;
}
case HloOpcode::kDynamicUpdateSlice: {
TF_RET_CHECK(proto.operand_ids_size() >= 2)
<< "DynamicUpdateSlice instruction should have at least 2 operands "
"but sees "
<< proto.operand_ids_size();
if (proto.operand_ids_size() != 3 || operands(2)->shape().rank() != 1) {
auto expected_operands = 2 + operands(0)->shape().rank();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< "DynamicUpdateSlice instruction should have "
<< expected_operands << " operands, but has "
<< proto.operand_ids_size();
}
const auto& operand_vector = all_operands();
instruction =
CreateDynamicUpdateSlice(shape, operands(0), operands(1),
absl::MakeSpan(operand_vector).subspan(2));
break;
}
case HloOpcode::kGather: {
TF_RET_CHECK(proto.has_gather_dimension_numbers())
<< "Gather instruction should have GatherDimensionNumbers set.";
auto gather_dimension_numbers = std::make_unique<GatherDimensionNumbers>(
proto.gather_dimension_numbers());
std::vector<int64_t> gather_slice_sizes;
const auto& slice_sizes = proto.gather_slice_sizes();
gather_slice_sizes.reserve(slice_sizes.size());
for (int64_t bound : slice_sizes) {
gather_slice_sizes.push_back(bound);
}
instruction = CreateGather(shape, operands(0), operands(1),
*gather_dimension_numbers, gather_slice_sizes,
proto.indices_are_sorted());
break;
}
case HloOpcode::kScatter: {
TF_RET_CHECK(proto.has_scatter_dimension_numbers())
<< "Scatter instruction should have ScatterDimensionNumbers set.";
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Scatter instruction should have 1 called computation but sees "
<< proto.called_computation_ids_size();
auto scatter_dimension_numbers =
std::make_unique<ScatterDimensionNumbers>(
proto.scatter_dimension_numbers());
auto operands = all_operands();
auto operand_span = absl::MakeConstSpan(operands);
auto input_count = operands.size() / 2;
instruction =
CreateScatter(shape, operand_span.first(input_count),
operands[input_count], operand_span.last(input_count),
computations(0), *scatter_dimension_numbers,
proto.indices_are_sorted(), proto.unique_indices());
break;
}
case HloOpcode::kIota:
TF_RET_CHECK(proto.dimensions_size() == 1)
<< "Iota instruction should have 1 dimension but sees "
<< proto.dimensions_size();
instruction = CreateIota(shape, proto.dimensions(0));
break;
case HloOpcode::kDot: {
int expected_operands =
HloDotInstruction::kOperands + proto.dot_sparsity_size();
TF_RET_CHECK(proto.dot_sparsity_size() <= HloDotInstruction::kOperands)
<< "Too many sparse dot descriptors: " << proto.dot_sparsity_size();
TF_RET_CHECK(proto.operand_ids_size() == expected_operands)
<< proto.opcode() << " instruction should have " << expected_operands
<< " operands but sees " << proto.operand_ids_size();
TF_RET_CHECK(proto.has_dot_dimension_numbers())
<< "Dot instruction should have dot_dimension_numbers.";
TF_RET_CHECK(absl::c_all_of(proto.precision_config().operand_precision(),
PrecisionConfig::Precision_IsValid));
PrecisionConfig precision_config = proto.precision_config();
precision_config.mutable_operand_precision()->Resize(
HloDotInstruction::kOperands, PrecisionConfig::DEFAULT);
std::vector<SparsityDescriptor> sparsity(proto.dot_sparsity().begin(),
proto.dot_sparsity().end());
auto operand_vector = all_operands();
instruction = std::make_unique<HloDotInstruction>(
shape, operands(0), operands(1), proto.dot_dimension_numbers(),
precision_config, std::move(sparsity),
absl::MakeSpan(operand_vector).subspan(HloDotInstruction::kOperands));
break;
}
case HloOpcode::kDomain: {
std::shared_ptr<const HloSharding> entry_hlo_sharding;
std::shared_ptr<const HloSharding> exit_hlo_sharding;
if (proto.has_domain_entry_sharding()) {
TF_ASSIGN_OR_RETURN(
HloSharding sharding,
HloSharding::FromProto(proto.domain_entry_sharding()));
entry_hlo_sharding = std::make_shared<const HloSharding>(sharding);
}
if (proto.has_domain_exit_sharding()) {
TF_ASSIGN_OR_RETURN(
HloSharding sharding,
HloSharding::FromProto(proto.domain_exit_sharding()));
exit_hlo_sharding = std::make_shared<const HloSharding>(sharding);
}
instruction = std::make_unique<HloDomainInstruction>(
shape, operands(0),
std::make_unique<ShardingMetadata>(entry_hlo_sharding),
std::make_unique<ShardingMetadata>(exit_hlo_sharding));
break;
}
case HloOpcode::kGetDimensionSize:
TF_RET_CHECK(proto.dimensions_size() == 1);
instruction =
CreateGetDimensionSize(shape, operands(0), proto.dimensions(0));
break;
case HloOpcode::kSetDimensionSize:
TF_RET_CHECK(proto.dimensions_size() == 1);
instruction = CreateSetDimensionSize(shape, operands(0), operands(1),
proto.dimensions(0));
break;
case HloOpcode::kReshape: {
int64_t inferred_dimension = -1;
if (!proto.dimensions().empty()) {
inferred_dimension = proto.dimensions()[0];
}
TF_RET_CHECK(shape.IsArray() && operands(0)->shape().IsArray() &&
(operands(0)->shape().is_unbounded_dynamic() ||
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operands(0)->shape())))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operands(0)->shape());
instruction = CreateReshape(shape, operands(0), inferred_dimension);
break;
}
case HloOpcode::kDynamicReshape: {
TF_RET_CHECK(shape.IsArray() && operands(0)->shape().IsArray() &&
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operands(0)->shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operands(0)->shape());
const auto& operand_vector = all_operands();
instruction = CreateDynamicReshape(
shape, operands(0), absl::MakeSpan(operand_vector).subspan(1));
break;
}
case HloOpcode::kCall: {
TF_RET_CHECK(proto.called_computation_ids_size() == 1)
<< "Call should have 1 called computation but has "
<< proto.called_computation_ids_size();
TF_RET_CHECK(!proto.has_precision_config())
<< instruction->opcode() << proto.name();
TF_RET_CHECK(!proto.has_dot_dimension_numbers()) << instruction->opcode();
if (proto.is_composite()) {
TF_RET_CHECK(proto.has_frontend_attributes())
<< "A composite call op must have frontend attributes";
auto map = proto.frontend_attributes().map();
auto name = map.find("composite.name");
TF_RET_CHECK(name != map.end() && !name->second.empty())
<< "A composite call op must have frontend attributes with key "
"composite.name whose value is non-empty";
auto attributes = map.find("composite.attributes");
TF_RET_CHECK(attributes == map.end() || !attributes->second.empty())
<< "A composite call op must have frontend attributes with key "
"composite.attributes whose value is default: {} or non-empty";
auto version_str = map.find("composite.version");
int64_t version = 0;
TF_RET_CHECK(
version_str == map.end() ||
(absl::SimpleAtoi(version_str->second, &version) && version >= 0))
<< "A composite call op must have frontend attributes with a "
"composite.version whose value is a non-negative integer but "
"got: "
<< version_str->second;
instruction = CreateCompositeCall(
shape, all_operands(),
computation_map.at(proto.called_computation_ids()[0]), name->second,
attributes == map.end() ? "{}" : attributes->second, version);
instruction->set_output_to_operand_aliasing(
output_to_operand_aliasing());
} else {
instruction = std::make_unique<HloCallInstruction>(
shape, all_operands(),
computation_map.at(proto.called_computation_ids()[0]));
instruction->set_output_to_operand_aliasing(
output_to_operand_aliasing());
}
break;
}
default: {
instruction = absl::WrapUnique(new HloInstruction(opcode, shape));
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RET_CHECK(proto.called_computation_ids_size() == 2)
<< "While should have 2 called computation but has "
<< proto.called_computation_ids_size();
computation_map.at(proto.called_computation_ids(0))
->SetWhileCallInstruction(instruction.get());
}
for (const int64_t operand_id : proto.operand_ids()) {
instruction->AppendOperand(instruction_map.at(operand_id));
}
for (const int64_t computation_id : proto.called_computation_ids()) {
instruction->AppendComputation(computation_map.at(computation_id));
}
if (instruction->opcode() == HloOpcode::kWhile) {
instruction->while_body()->SetWhileCallInstruction(instruction.get());
}
TF_RET_CHECK(!proto.has_precision_config())
<< instruction->opcode() << proto.DebugString();
TF_RET_CHECK(!proto.has_dot_dimension_numbers()) << instruction->opcode();
break;
}
}
for (const int64_t predecessor_id : proto.control_predecessor_ids()) {
TF_RET_CHECK(ContainsKey(instruction_map, predecessor_id))
<< "No instruction with id " << predecessor_id;
TF_RETURN_IF_ERROR(instruction_map.at(predecessor_id)
->AddControlDependencyTo(instruction.get()));
}
TF_RET_CHECK(!proto.name().empty());
instruction->SetAndSanitizeName(proto.name());
*instruction->metadata_ = proto.metadata();
instruction->backend_config_ = BackendConfigWrapper(proto.backend_config());
TF_RET_CHECK(proto.id() >= 0)
<< "Instruction with negative id: " << proto.id();
TF_RET_CHECK(proto.id() <= INT_MAX)
<< "Instruction with id > INT_MAX: " << proto.id();
instruction->unique_id_ = proto.id();
if (proto.has_sharding()) {
TF_ASSIGN_OR_RETURN(HloSharding sharding,
HloSharding::FromProto(proto.sharding()));
sharding = sharding.NormalizeTupleSharding(instruction->shape());
instruction->set_sharding(sharding);
}
if (proto.has_frontend_attributes()) {
instruction->set_frontend_attributes(proto.frontend_attributes());
}
if (proto.has_statistics_viz()) {
instruction->set_statistics_viz(proto.statistics_viz());
}
if (proto.has_original_value()) {
const xla::OriginalValueProto& original_value_proto =
proto.original_value();
auto original_value = std::make_shared<OriginalValue>(shape);
for (const auto& leaf : original_value_proto.leaves()) {
*original_value->mutable_element(ShapeIndex(leaf.leaf_shape_index())) = {
leaf.instruction_name(), ShapeIndex(leaf.shape_index())};
}
instruction->set_original_value(original_value);
}
return std::move(instruction);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateParameter(
int64_t parameter_number, const Shape& shape, absl::string_view name) {
return std::make_unique<HloParameterInstruction>(parameter_number, shape,
name);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConstant(
Literal literal) {
return std::make_unique<HloConstantInstruction>(std::move(literal));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateIota(
const Shape& shape, int64_t iota_dimension) {
return std::make_unique<HloIotaInstruction>(shape, iota_dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTopK(
const Shape& shape, HloInstruction* input, int64_t k, bool largest) {
return std::make_unique<HloTopKInstruction>(shape, input, k, largest);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetTupleElement(const Shape& shape,
HloInstruction* operand, int64_t index) {
return std::make_unique<HloGetTupleElementInstruction>(shape, operand, index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetTupleElement(HloInstruction* operand, int64_t index) {
return std::make_unique<HloGetTupleElementInstruction>(
operand->shape().tuple_shapes(index), operand, index);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRng(
const Shape& shape, RandomDistribution distribution,
absl::Span<HloInstruction* const> parameters) {
return std::make_unique<HloRngInstruction>(shape, distribution, parameters);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateRngGetAndUpdateState(const Shape& shape, int64_t delta) {
return std::make_unique<HloRngGetAndUpdateStateInstruction>(shape, delta);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateRngBitGenerator(const Shape& shape, HloInstruction* state,
RandomAlgorithm algorithm) {
return std::make_unique<HloRngBitGeneratorInstruction>(shape, state,
algorithm);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateNary(
const Shape& shape, HloOpcode opcode,
absl::Span<HloInstruction* const> operands) {
if (opcode == HloOpcode::kCopy) {
CHECK(!shape.IsOpaque());
}
auto instruction = absl::WrapUnique(new HloInstruction(opcode, shape));
for (auto operand : operands) {
instruction->AppendOperand(operand);
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateUnary(
const Shape& shape, HloOpcode opcode, HloInstruction* operand) {
switch (opcode) {
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kClz:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTanh:
case HloOpcode::kTan:
break;
default:
LOG(FATAL) << "Invalid unary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {operand});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBinary(
const Shape& shape, HloOpcode opcode, HloInstruction* lhs,
HloInstruction* rhs) {
switch (opcode) {
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kDivide:
case HloOpcode::kComplex:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
break;
default:
LOG(FATAL) << "Invalid binary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {lhs, rhs});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTernary(
const Shape& shape, HloOpcode opcode, HloInstruction* lhs,
HloInstruction* rhs, HloInstruction* ehs) {
switch (opcode) {
case HloOpcode::kClamp:
case HloOpcode::kSelect:
break;
default:
LOG(FATAL) << "Invalid ternary instruction opcode " << opcode;
}
return CreateNary(shape, opcode, {lhs, rhs, ehs});
}
std::unique_ptr<HloInstruction> HloInstruction::CreateVariadic(
const Shape& shape, HloOpcode opcode,
absl::Span<HloInstruction* const> operands) {
CHECK_EQ(HloOpcode::kTuple, opcode);
return CreateNary(shape, opcode, operands);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateMap(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* map_computation) {
return std::make_unique<HloMapInstruction>(shape, operands, map_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConvolve(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64_t feature_group_count, int64_t batch_group_count,
const Window& window, const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config) {
return std::make_unique<HloConvolutionInstruction>(
shape, lhs, rhs, feature_group_count, batch_group_count, window,
dimension_numbers, precision_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFft(
const Shape& shape, HloInstruction* operand, FftType fft_type,
absl::Span<const int64_t> fft_length) {
return std::make_unique<HloFftInstruction>(shape, operand, fft_type,
fft_length);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* async_computation,
absl::string_view async_execution_thread) {
return std::make_unique<HloAsyncStartInstruction>(
HloOpcode::kAsyncStart, shape, operands, async_computation,
async_execution_thread);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncUpdate(
const Shape& shape, HloInstruction* operand) {
return std::make_unique<HloAsyncInstruction>(HloOpcode::kAsyncUpdate, shape,
operand);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAsyncDone(
const Shape& shape, HloInstruction* operand) {
return std::make_unique<HloAsyncInstruction>(HloOpcode::kAsyncDone, shape,
operand);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCopyStart(
const Shape& shape, HloInstruction* operand,
std::optional<int> cross_program_prefetch) {
return std::make_unique<HloCopyStartInstruction>(shape, operand,
cross_program_prefetch);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCompare(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
ComparisonDirection direction, std::optional<Comparison::Type> type) {
return std::make_unique<HloCompareInstruction>(shape, lhs, rhs, direction,
type);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateTriangularSolve(const Shape& shape, HloInstruction* a,
HloInstruction* b,
const TriangularSolveOptions& options) {
return std::make_unique<HloTriangularSolveInstruction>(shape, a, b, options);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCholesky(
const Shape& shape, HloInstruction* a, const CholeskyOptions& options) {
return std::make_unique<HloCholeskyInstruction>(shape, a, options);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDot(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::vector<SparsityDescriptor> sparsity,
absl::Span<HloInstruction* const> sparse_meta) {
return std::make_unique<HloDotInstruction>(shape, lhs, rhs, dimension_numbers,
precision_config,
std::move(sparsity), sparse_meta);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReducePrecision(const Shape& shape,
HloInstruction* operand,
const int exponent_bits,
const int mantissa_bits) {
return std::make_unique<HloReducePrecisionInstruction>(
shape, operand, exponent_bits, mantissa_bits);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllGather(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllGatherInstruction>(
HloOpcode::kAllGather, shape, operands, all_gather_dimension, device_list,
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllGather(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, absl::Span<const ReplicaGroup> replica_groups,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return CreateAllGather(shape, operands, all_gather_dimension,
CollectiveDeviceList(replica_groups), constrain_layout,
channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllGatherStart(const Shape& shape,
absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension,
const CollectiveDeviceList& device_list,
bool constrain_layout,
const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllGatherInstruction>(
HloOpcode::kAllGatherStart, shape, operands, all_gather_dimension,
device_list, constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllGatherStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t all_gather_dimension, absl::Span<const ReplicaGroup> replica_groups,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return CreateAllGatherStart(shape, operands, all_gather_dimension,
CollectiveDeviceList(replica_groups),
constrain_layout, channel_id,
use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllReduceInstruction>(
HloOpcode::kAllReduce, shape, operands, reduce_computation, device_list,
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids) {
return CreateAllReduce(shape, operands, reduce_computation,
CollectiveDeviceList(replica_groups), constrain_layout,
channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReduceScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation, const CollectiveDeviceList& device_list,
bool constrain_layout, const std::optional<int64_t>& channel_id,
bool use_global_device_ids, int64_t scatter_dimension) {
return std::make_unique<HloReduceScatterInstruction>(
shape, operands, reduce_computation, device_list, constrain_layout,
channel_id, use_global_device_ids, scatter_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateReduceScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids,
int64_t scatter_dimension) {
return CreateReduceScatter(
shape, operands, reduce_computation, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, use_global_device_ids, scatter_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllReduceStart(const Shape& shape,
absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
const CollectiveDeviceList& device_list,
bool constrain_layout,
const std::optional<int64_t>& channel_id,
bool use_global_device_ids) {
return std::make_unique<HloAllReduceInstruction>(
HloOpcode::kAllReduceStart, shape, operands, reduce_computation,
device_list, constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAllReduceStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* reduce_computation,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id, bool use_global_device_ids) {
return CreateAllReduceStart(
shape, operands, reduce_computation, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, use_global_device_ids);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllToAll(
const Shape& shape, absl::Span<HloInstruction* const> operands,
const CollectiveDeviceList& device_list, bool constrain_layout,
const std::optional<int64_t>& channel_id,
const std::optional<int64_t>& split_dimension) {
return std::make_unique<HloAllToAllInstruction>(shape, operands, device_list,
constrain_layout, channel_id,
split_dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAllToAll(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id,
const std::optional<int64_t>& split_dimension) {
return CreateAllToAll(shape, operands, CollectiveDeviceList(replica_groups),
constrain_layout, channel_id, split_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectiveBroadcast(
const Shape& shape, absl::Span<HloInstruction* const> operands,
const CollectiveDeviceList& device_list, bool constrain_layout,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectiveBroadcastInstruction>(
HloOpcode::kCollectiveBroadcast, shape, operands, device_list,
constrain_layout, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectiveBroadcast(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<const ReplicaGroup> replica_groups, bool constrain_layout,
const std::optional<int64_t>& channel_id) {
return CreateCollectiveBroadcast(shape, operands,
CollectiveDeviceList(replica_groups),
constrain_layout, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermute(
const Shape& shape, HloInstruction* operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermute, shape, operand, source_target_pairs,
channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermute(
const Shape& shape, HloInstruction* input, HloInstruction* output,
HloInstruction* input_start_indices, HloInstruction* output_start_indices,
absl::Span<const std::pair<int64_t, int64_t>> source_target_pairs,
absl::Span<const std::vector<int64_t>> slice_sizes,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermute, shape, input, output, input_start_indices,
output_start_indices, source_target_pairs, slice_sizes, channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermuteStart(
const Shape& shape, HloInstruction* operand,
const std::vector<std::pair<int64_t, int64_t>>& source_target_pairs,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermuteStart, shape, operand, source_target_pairs,
channel_id);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCollectivePermuteStart(
const Shape& shape, HloInstruction* input, HloInstruction* output,
HloInstruction* input_start_indices, HloInstruction* output_start_indices,
absl::Span<const std::pair<int64_t, int64_t>> source_target_pairs,
absl::Span<const std::vector<int64_t>> slice_sizes,
const std::optional<int64_t>& channel_id) {
return std::make_unique<HloCollectivePermuteInstruction>(
HloOpcode::kCollectivePermuteStart, shape, input, output,
input_start_indices, output_start_indices, source_target_pairs,
slice_sizes, channel_id);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReplicaId(
const Shape& shape) {
CHECK(Shape::Equal().IgnoreLayout()(shape, ShapeUtil::MakeShape(U32, {})))
<< "HloInstruction replica-id must have a shape of u32[], but "
<< shape.ToString() << " is specified";
return absl::WrapUnique(new HloInstruction(HloOpcode::kReplicaId, shape));
}
std::unique_ptr<HloInstruction> HloInstruction::CreatePartitionId(
const Shape& shape) {
CHECK(Shape::Equal().IgnoreLayout()(shape, ShapeUtil::MakeShape(U32, {})))
<< "HloInstruction partition-id must have a shape of u32[], but "
<< shape.ToString() << " is specified";
return absl::WrapUnique(new HloInstruction(HloOpcode::kPartitionId, shape));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateInfeed(
const Shape& infeed_shape, HloInstruction* token_operand,
const std::string& config) {
return std::make_unique<HloInfeedInstruction>(infeed_shape, token_operand,
config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateOutfeed(
const Shape& outfeed_shape, HloInstruction* operand,
HloInstruction* token_operand, absl::string_view outfeed_config) {
return std::make_unique<HloOutfeedInstruction>(outfeed_shape, operand,
token_operand, outfeed_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSend(
HloInstruction* operand, HloInstruction* token, int64_t channel_id,
bool is_host_transfer) {
return std::make_unique<HloSendInstruction>(operand, token, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
HloInstruction* operand, bool is_host_transfer) {
auto send_operand = DynCast<HloSendInstruction>(operand);
CHECK(send_operand != nullptr)
<< "SendDone must take the context operand from Send";
return std::make_unique<HloSendDoneInstruction>(send_operand,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSendDone(
HloInstruction* operand, int64_t channel_id, bool is_host_transfer) {
return std::make_unique<HloSendDoneInstruction>(operand, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecv(
const Shape& shape, HloInstruction* token, int64_t channel_id,
bool is_host_transfer) {
return std::make_unique<HloRecvInstruction>(shape, token, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
HloInstruction* operand, bool is_host_transfer) {
auto recv_operand = DynCast<HloRecvInstruction>(operand);
CHECK(recv_operand != nullptr)
<< "RecvDone must take the context operand from Recv";
return std::make_unique<HloRecvDoneInstruction>(recv_operand,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateRecvDone(
HloInstruction* operand, int64_t channel_id, bool is_host_transfer) {
return std::make_unique<HloRecvDoneInstruction>(operand, channel_id,
is_host_transfer);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReverse(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> dimensions) {
return std::make_unique<HloReverseInstruction>(shape, operand, dimensions);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateAfterAll(
absl::Span<HloInstruction* const> operands) {
CHECK(!operands.empty());
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
for (auto operand : operands) {
instruction->AppendOperand(operand);
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateToken() {
return absl::WrapUnique(
new HloInstruction(HloOpcode::kAfterAll, ShapeUtil::MakeTokenShape()));
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateAddDependency(HloInstruction* data_operand,
HloInstruction* token_operand) {
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kAddDependency, data_operand->shape()));
instruction->AppendOperand(data_operand);
instruction->AppendOperand(token_operand);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateWhile(
const Shape& shape, HloComputation* condition, HloComputation* body,
HloInstruction* init) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kWhile, shape));
instruction->AppendOperand(init);
instruction->AppendComputation(body);
instruction->AppendComputation(condition);
body->SetWhileCallInstruction(instruction.get());
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConditional(
const Shape& shape, HloInstruction* pred,
HloInstruction* true_computation_arg, HloComputation* true_computation,
HloInstruction* false_computation_arg, HloComputation* false_computation) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConditional, shape));
instruction->AppendOperand(pred);
instruction->AppendOperand(true_computation_arg);
instruction->AppendOperand(false_computation_arg);
instruction->AppendComputation(true_computation);
instruction->AppendComputation(false_computation);
true_computation->SetConditionalCallInstruction(instruction.get());
false_computation->SetConditionalCallInstruction(instruction.get());
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConditional(
const Shape& shape, HloInstruction* branch_index,
absl::Span<HloComputation* const> branch_computations,
absl::Span<HloInstruction* const> branch_computation_args) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConditional, shape));
instruction->AppendOperand(branch_index);
CHECK_EQ(branch_computations.size(), branch_computation_args.size());
for (int i = 0; i < branch_computations.size(); ++i) {
instruction->AppendComputation(branch_computations[i]);
instruction->AppendOperand(branch_computation_args[i]);
branch_computations[i]->SetConditionalCallInstruction(instruction.get());
}
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSlice(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices,
absl::Span<const int64_t> strides) {
return std::make_unique<HloSliceInstruction>(shape, operand, start_indices,
limit_indices, strides);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDynamicSlice(
const Shape& shape, HloInstruction* operand,
absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes) {
return std::make_unique<HloDynamicSliceInstruction>(
shape, operand, start_indices, slice_sizes);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateDynamicUpdateSlice(
const Shape& shape, HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices) {
return std::make_unique<HloDynamicUpdateSliceInstruction>(
shape, operand, update, start_indices);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConcatenate(
const Shape& shape, absl::Span<HloInstruction* const> operands,
int64_t dimension) {
return std::make_unique<HloConcatenateInstruction>(shape, operands,
dimension);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateConvert(
const Shape& shape, HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kConvert, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBitcastConvert(const Shape& shape,
HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kBitcastConvert, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateStochasticConvert(const Shape& shape,
HloInstruction* operand,
HloInstruction* random) {
auto instruction = absl::WrapUnique(
new HloInstruction(HloOpcode::kStochasticConvert, shape));
instruction->AppendOperand(operand);
instruction->AppendOperand(random);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBitcast(
const Shape& shape, HloInstruction* operand) {
auto instruction =
absl::WrapUnique(new HloInstruction(HloOpcode::kBitcast, shape));
instruction->AppendOperand(operand);
return instruction;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
return absl::WrapUnique(new HloReduceInstruction(
shape, {operand, init_value}, dimensions_to_reduce, reduce_computation));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
std::vector<HloInstruction*> all_args;
all_args.reserve(operands.size() * 2);
all_args.insert(all_args.end(), operands.begin(), operands.end());
all_args.insert(all_args.end(), init_values.begin(), init_values.end());
return std::make_unique<HloReduceInstruction>(
shape, all_args, dimensions_to_reduce, reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduce(
const Shape& shape, HloInstruction* tuple_of_instructions,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions_to_reduce,
HloComputation* reduce_computation) {
if (!tuple_of_instructions->shape().IsTuple()) {
CHECK_EQ(init_values.size(), 1)
<< "The first input has to be a tuple, or the number of init values "
"has to be one.";
return CreateReduce(shape, tuple_of_instructions, init_values[0],
dimensions_to_reduce, reduce_computation);
}
absl::InlinedVector<HloInstruction*, 4> inputs;
for (int idx = 0; idx < tuple_of_instructions->shape().tuple_shapes_size();
idx++) {
std::unique_ptr<HloInstruction> gte =
HloInstruction::CreateGetTupleElement(tuple_of_instructions, idx);
inputs.push_back(
tuple_of_instructions->parent()->AddInstruction(std::move(gte)));
}
return CreateReduce(shape, inputs, init_values, dimensions_to_reduce,
reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduceWindow(
const Shape& shape, HloInstruction* operand, HloInstruction* init_value,
const Window& window, HloComputation* reduce_computation) {
return std::make_unique<HloReduceWindowInstruction>(
shape, operand, init_value, window, reduce_computation);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReduceWindow(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values, const Window& window,
HloComputation* reduce_computation) {
return std::make_unique<HloReduceWindowInstruction>(
shape, operands, init_values, window, reduce_computation);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormTraining(const Shape& shape,
HloInstruction* operand,
HloInstruction* scale,
HloInstruction* offset, float epsilon,
int64_t feature_index) {
return std::make_unique<HloBatchNormTrainingInstruction>(
shape, operand, scale, offset, epsilon, feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormInference(
const Shape& shape, HloInstruction* operand, HloInstruction* scale,
HloInstruction* offset, HloInstruction* mean, HloInstruction* variance,
float epsilon, int64_t feature_index) {
return std::make_unique<HloBatchNormInferenceInstruction>(
shape, operand, scale, offset, mean, variance, epsilon, feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBatchNormGrad(const Shape& shape, HloInstruction* operand,
HloInstruction* scale, HloInstruction* mean,
HloInstruction* variance,
HloInstruction* grad_output, float epsilon,
int64_t feature_index) {
return std::make_unique<HloBatchNormGradInstruction>(
shape, operand, scale, mean, variance, grad_output, epsilon,
feature_index);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateSelectAndScatter(
const Shape& shape, HloInstruction* operand, HloComputation* select,
const Window& window, HloInstruction* source, HloInstruction* init_value,
HloComputation* scatter) {
return std::make_unique<HloSelectAndScatterInstruction>(
shape, operand, select, window, source, init_value, scatter);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateBroadcast(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> broadcast_dimensions) {
return std::make_unique<HloBroadcastInstruction>(shape, operand,
broadcast_dimensions);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateGetDimensionSize(const Shape& shape,
HloInstruction* operand,
int64_t dimension) {
return std::make_unique<HloGetDimensionSizeInstruction>(shape, operand,
dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateSetDimensionSize(const Shape& shape,
HloInstruction* operand,
HloInstruction* val, int64_t dimension) {
return std::make_unique<HloSetDimensionSizeInstruction>(shape, operand, val,
dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateBroadcastSequence(
const Shape& output_shape, HloInstruction* operand,
absl::FunctionRef<HloInstruction*(std::unique_ptr<HloInstruction>)> adder) {
CHECK(ShapeUtil::IsScalar(operand->shape()) ||
operand->shape().rank() == output_shape.rank());
Shape broadcast_shape = ShapeUtil::ChangeElementType(
output_shape, operand->shape().element_type());
if (ShapeUtil::IsScalar(operand->shape())) {
auto broadcast =
HloInstruction::CreateBroadcast(broadcast_shape, operand, {});
broadcast->set_metadata(operand->metadata());
if (operand->has_sharding()) {
broadcast->copy_sharding(operand);
}
broadcast->set_frontend_attributes(operand->frontend_attributes());
broadcast->set_statistics_viz(operand->statistics_viz());
return broadcast;
}
std::vector<int64_t> broadcast_dimensions;
std::vector<int64_t> reshaped_dimensions;
for (int i = 0; i < operand->shape().rank(); i++) {
if (operand->shape().dimensions(i) == output_shape.dimensions(i)) {
broadcast_dimensions.push_back(i);
reshaped_dimensions.push_back(operand->shape().dimensions(i));
} else {
CHECK_EQ(operand->shape().dimensions(i), 1)
<< "An explicit broadcast sequence requires the broadcasted "
"dimensions to be trivial; operand: "
<< operand->ToString() << "; output_shape: " << output_shape;
}
}
HloInstruction* reshaped_operand = adder(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(operand->shape().element_type(),
reshaped_dimensions),
operand));
reshaped_operand->set_metadata(operand->metadata());
if (operand->has_sharding()) {
reshaped_operand->copy_sharding(operand);
}
reshaped_operand->set_frontend_attributes(operand->frontend_attributes());
reshaped_operand->set_statistics_viz(operand->statistics_viz());
auto broadcast = HloInstruction::CreateBroadcast(
broadcast_shape, reshaped_operand, broadcast_dimensions);
broadcast->set_metadata(operand->metadata());
if (operand->has_sharding()) {
broadcast->copy_sharding(operand);
}
broadcast->set_frontend_attributes(operand->frontend_attributes());
broadcast->set_statistics_viz(operand->statistics_viz());
return broadcast;
}
std::unique_ptr<HloInstruction> HloInstruction::CreatePad(
const Shape& shape, HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config) {
return std::make_unique<HloPadInstruction>(shape, operand, padding_value,
padding_config);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateReshape(
const Shape& shape, HloInstruction* operand, int64_t inferred_dimension) {
CHECK(operand->shape().is_unbounded_dynamic() ||
ShapeUtil::StaticExtentProduct(shape) ==
ShapeUtil::StaticExtentProduct(operand->shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(operand->shape());
return std::make_unique<HloReshapeInstruction>(shape, operand,
inferred_dimension);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateDynamicReshape(
const Shape& shape, HloInstruction* data_operand,
absl::Span<HloInstruction* const> dim_sizes) {
CHECK_EQ(ShapeUtil::StaticExtentProduct(shape),
ShapeUtil::StaticExtentProduct(data_operand[0].shape()))
<< "shape: " << ShapeUtil::HumanString(shape)
<< " operand: " << ShapeUtil::HumanString(data_operand[0].shape());
CHECK_EQ(shape.rank(), dim_sizes.size());
return std::make_unique<HloDynamicReshapeInstruction>(shape, data_operand,
dim_sizes);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTranspose(
const Shape& shape, HloInstruction* operand,
absl::Span<const int64_t> dimensions) {
return std::make_unique<HloTransposeInstruction>(shape, operand, dimensions);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateSort(
const Shape& shape, int64_t dimension,
absl::Span<HloInstruction* const> operands, HloComputation* compare,
bool is_stable) {
return std::make_unique<HloSortInstruction>(shape, dimension, operands,
compare, is_stable);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind, HloInstruction* fused_root,
absl::string_view prefix) {
return std::make_unique<HloFusionInstruction>(shape, fusion_kind, fused_root,
prefix);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateFusion(
const Shape& shape, FusionKind fusion_kind,
absl::Span<HloInstruction* const> operands,
HloComputation* fusion_computation, absl::string_view prefix) {
return std::make_unique<HloFusionInstruction>(shape, fusion_kind, operands,
fusion_computation, prefix);
}
void HloInstruction::set_single_sharding(const HloSharding& sharding) {
CHECK(!sharding.IsTuple()) << sharding;
if (shape().IsTuple()) {
set_sharding(HloSharding::Tuple(sharding.GetAsShapeTree(shape())));
} else {
set_sharding(sharding);
}
}
void HloInstruction::SetupDerivedInstruction(
HloInstruction* derived_instruction) const {
if (sharding_ != nullptr &&
ShapeUtil::CompatibleKind(shape_, derived_instruction->shape())) {
derived_instruction->set_sharding(*sharding_);
} else if (!ShapeUtil::CompatibleKind(shape_, derived_instruction->shape())) {
derived_instruction->clear_sharding();
}
derived_instruction->set_metadata(*metadata_);
if (has_rare()) {
derived_instruction->set_frontend_attributes(frontend_attributes());
derived_instruction->set_statistics_viz(statistics_viz());
} else if (derived_instruction->has_rare()) {
derived_instruction->mutable_rare()->frontend_attributes.Clear();
derived_instruction->mutable_rare()->statistics_viz.Clear();
}
if (opcode() == derived_instruction->opcode() && has_backend_config()) {
derived_instruction->CopyBackendConfigFrom(this);
}
}
bool HloInstruction::HasSideEffectNoRecurse() const {
switch (opcode_) {
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kRng:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllGatherDone:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kCollectivePermuteDone:
return true;
case HloOpcode::kAllToAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
if (Cast<HloCollectiveInstruction>(this)->constrain_layout()) {
return true;
}
[[fallthrough]];
case HloOpcode::kCollectivePermute:
return Cast<HloChannelInstruction>(this)->channel_id().has_value() &&
!GetModule()->config().use_spmd_partitioning();
case HloOpcode::kCustomCall:
return Cast<HloCustomCallInstruction>(this)
->custom_call_has_side_effect();
default:
return false;
}
}
bool HloInstruction::HasSideEffect() const {
if (HasSideEffectNoRecurse()) {
return true;
}
for (const auto& computation : called_computations()) {
if (computation->HasSideEffect()) {
return true;
}
}
return false;
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCall(
const Shape& shape, HloInstruction* called_computation_root) {
return std::make_unique<HloCallInstruction>(shape, called_computation_root);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* computation) {
return std::make_unique<HloCallInstruction>(shape, operands, computation);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCompositeCall(const Shape& shape,
HloInstruction* decomposition_root,
const std::string& name,
const std::string& attributes,
int64_t version) {
return std::make_unique<HloCallInstruction>(shape, decomposition_root, name,
attributes, version);
}
std::unique_ptr<HloInstruction>
HloInstruction::CreateCompositeCall(const Shape& shape,
absl::Span<HloInstruction* const> operands,
HloComputation* decomposition,
const std::string& name,
const std::string& attributes,
int64_t version) {
return std::make_unique<HloCallInstruction>(shape, operands, decomposition,
name, attributes, version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, custom_call_target, std::move(opaque), api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* to_apply, absl::string_view custom_call_target,
std::string opaque, CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, to_apply, custom_call_target, std::move(opaque),
api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::Span<HloComputation* const> called_computations,
absl::string_view custom_call_target, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, called_computations, custom_call_target,
std::move(opaque), api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateCustomCall(
const Shape& shape, absl::Span<HloInstruction* const> operands,
absl::string_view custom_call_target,
absl::Span<const Shape> operand_shapes_with_layout, std::string opaque,
CustomCallApiVersion api_version) {
return std::make_unique<HloCustomCallInstruction>(
shape, operands, custom_call_target, std::move(opaque),
operand_shapes_with_layout, api_version);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateTuple(
absl::Span<HloInstruction* const> elements) {
std::vector<const Shape*> element_shapes;
element_shapes.reserve(elements.size());
for (auto element : elements) {
element_shapes.push_back(&element->shape());
}
Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(element_shapes);
return CreateVariadic(tuple_shape, HloOpcode::kTuple, elements);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateGather(
const Shape& shape, HloInstruction* operand, HloInstruction* start_indices,
const GatherDimensionNumbers& gather_dim_numbers,
absl::Span<const int64_t> slice_sizes, bool indices_are_sorted) {
return std::make_unique<HloGatherInstruction>(shape, operand, start_indices,
gather_dim_numbers, slice_sizes,
indices_are_sorted);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateScatter(
const Shape& shape, HloInstruction* operand,
HloInstruction* scatter_indices, HloInstruction* updates,
HloComputation* update_computation,
const ScatterDimensionNumbers& scatter_dim_numbers, bool indices_are_sorted,
bool unique_indices) {
return absl::WrapUnique(new HloScatterInstruction(
shape, {operand, scatter_indices, updates}, update_computation,
scatter_dim_numbers, indices_are_sorted, unique_indices));
}
std::unique_ptr<HloInstruction> HloInstruction::CreateScatter(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloInstruction* scatter_indices, absl::Span<HloInstruction* const> updates,
HloComputation* update_computation,
const ScatterDimensionNumbers& scatter_dim_numbers, bool indices_are_sorted,
bool unique_indices) {
absl::InlinedVector<HloInstruction*, 3> args;
args.reserve(operands.size() + updates.size() + 1);
absl::c_copy(operands, std::back_inserter(args));
args.push_back(scatter_indices);
absl::c_copy(updates, std::back_inserter(args));
return std::make_unique<HloScatterInstruction>(
shape, args, update_computation, scatter_dim_numbers, indices_are_sorted,
unique_indices);
}
std::unique_ptr<HloInstruction> HloInstruction::CreateDomain(
const Shape& shape, HloInstruction* operand,
std::unique_ptr<DomainMetadata> operand_side_metadata,
std::unique_ptr<DomainMetadata> user_side_metadata) {
return std::make_unique<HloDomainInstruction>(
shape, operand, std::move(operand_side_metadata),
std::move(user_side_metadata));
}
bool HloInstruction::IsThreadIncluded(
absl::string_view execution_thread,
const absl::flat_hash_set<absl::string_view>& execution_threads_set) {
return execution_threads_set.empty() ||
execution_threads_set.contains(execution_thread);
}
void HloInstruction::AddSuffixToInstructionName(
const absl::string_view suffix) {
const std::string dot_suffix = absl::StrCat(".", suffix);
size_t index = name().rfind(dot_suffix);
if (index == std::string::npos) {
this->name_ = absl::StrCat(name(), dot_suffix);
} else {
auto after_suffix = name().substr(index + dot_suffix.size());
if (after_suffix.empty()) {
this->name_ = absl::StrCat(name(), "2");
} else {
int64_t numeric_suffix;
if (absl::SimpleAtoi(after_suffix, &numeric_suffix)) {
this->name_ =
StrCat(name().substr(0, index), dot_suffix, numeric_suffix + 1);
} else {
this->name_ = absl::StrCat(name(), dot_suffix);
}
}
}
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
const Shape& shape, absl::Span<HloInstruction* const> new_operands,
HloCloneContext* context) const {
return CloneWithNewOperands(shape, new_operands, "", context);
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewOperands(
const Shape& shape, absl::Span<HloInstruction* const> new_operands,
const std::string& suffix, HloCloneContext* context) const {
VLOG(3) << "CloneWithNewOperands:\n " << ToString();
VLOG(3) << " new operands:";
for (const HloInstruction* new_operand : new_operands) {
VLOG(3) << " %" << new_operand->name();
}
std::unique_ptr<HloInstruction> clone;
switch (opcode_) {
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kFft:
case HloOpcode::kCompare:
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kCopyStart:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReduce:
case HloOpcode::kTranspose:
case HloOpcode::kBroadcast:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kMap:
case HloOpcode::kSlice:
case HloOpcode::kConstant:
case HloOpcode::kFusion:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kParameter:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReducePrecision:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kConvolution:
case HloOpcode::kCustomCall:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
case HloOpcode::kSort:
case HloOpcode::kGather:
case HloOpcode::kScatter:
case HloOpcode::kIota:
case HloOpcode::kDot:
case HloOpcode::kDomain:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
case HloOpcode::kTopK:
clone = CloneWithNewOperandsImpl(shape, new_operands, context);
break;
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kBitcast:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kCopy:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kFloor:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
CHECK_EQ(new_operands.size(), 1);
clone = CreateUnary(shape, opcode_, new_operands[0]);
break;
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kComplex:
case HloOpcode::kDivide:
case HloOpcode::kMultiply:
case HloOpcode::kSubtract:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
CHECK_EQ(new_operands.size(), 2);
clone = CreateBinary(shape, opcode_, new_operands[0], new_operands[1]);
break;
case HloOpcode::kClamp:
case HloOpcode::kSelect:
CHECK_EQ(new_operands.size(), 3);
clone = CreateTernary(shape, opcode_, new_operands[0], new_operands[1],
new_operands[2]);
break;
case HloOpcode::kCall:
clone = CreateCall(shape, new_operands, to_apply());
break;
case HloOpcode::kConvert:
CHECK_EQ(new_operands.size(), 1);
clone = CreateConvert(shape, new_operands[0]);
break;
case HloOpcode::kBitcastConvert:
CHECK_EQ(new_operands.size(), 1);
clone = CreateBitcastConvert(shape, new_operands[0]);
break;
case HloOpcode::kStochasticConvert:
CHECK_EQ(new_operands.size(), 2);
clone = CreateStochasticConvert(shape, new_operands[0], new_operands[1]);
break;
case HloOpcode::kDynamicUpdateSlice:
clone = CreateDynamicUpdateSlice(shape, new_operands[0], new_operands[1],
new_operands.subspan(2));
break;
case HloOpcode::kTuple:
clone = CreateTuple(new_operands);
*clone->mutable_shape() = shape;
break;
case HloOpcode::kWhile:
CHECK_EQ(new_operands.size(), 1);
clone =
CreateWhile(shape, while_condition(), while_body(), new_operands[0]);
while_body()->SetWhileCallInstruction(const_cast<HloInstruction*>(this));
break;
case HloOpcode::kConditional:
CHECK_EQ(new_operands.size(), branch_count() + 1);
clone = CreateConditional(shape, new_operands[0],
absl::MakeSpan(branch_computations()),
new_operands.subspan(1));
break;
case HloOpcode::kAfterAll:
if (new_operands.empty()) {
clone = CreateToken();
} else {
clone = CreateAfterAll(new_operands);
}
break;
case HloOpcode::kAddDependency:
CHECK_EQ(new_operands.size(), 2);
clone = CreateAddDependency(new_operands[0], new_operands[1]);
break;
case HloOpcode::kReplicaId:
CHECK_EQ(new_operands.size(), 0);
clone = CreateReplicaId(shape);
break;
case HloOpcode::kPartitionId:
CHECK_EQ(new_operands.size(), 0);
clone = CreatePartitionId(shape);
break;
default:
CHECK(0) << "Unsupported opcode: " << opcode_;
}
SetupDerivedInstruction(clone.get());
clone->set_parent(parent_);
clone->backend_config_ = BackendConfigWrapper(backend_config_);
clone->SetAndSanitizeName(name());
if (context != nullptr) {
context->MapInstruction(this, clone.get());
clone->ReplaceCalledComputations([&](HloComputation* callee) {
return callee->parent() != context->module()
? context->module()->DeepCloneComputation(callee, context)
: callee;
});
if (opcode() == HloOpcode::kWhile) {
clone->while_body()->SetWhileCallInstruction(clone.get());
}
}
if (!suffix.empty()) {
clone->AddSuffixToInstructionName(suffix);
}
return clone;
}
void HloInstruction::DetachFromOperandsAndUsers() {
if (cleaned_up_) {
return;
}
cleaned_up_ = true;
for (int64_t operand_num = 0; operand_num < operand_count(); ++operand_num) {
HloInstruction* operand = operands_[operand_num];
if (operand == nullptr) {
continue;
}
operand->users_.MaybeRemoveUser(this);
operands_[operand_num] = nullptr;
}
for (auto& user : this->users()) {
for (int i = 0; i < user->operand_count(); ++i) {
if (user->operands_[i] == this) {
user->operands_[i] = nullptr;
}
}
}
}
std::unique_ptr<HloInstruction> HloInstruction::CloneWithNewShape(
const Shape& shape, const std::string& suffix,
HloCloneContext* context) const {
std::unique_ptr<HloInstruction> clone =
CloneWithNewOperands(shape, operands_, context);
if (suffix.empty()) {
clone->name_.assign(name().begin(), name().end());
} else {
clone->AddSuffixToInstructionName(suffix);
}
return clone;
}
std::unique_ptr<HloInstruction> HloInstruction::Clone(
const std::string& suffix, HloCloneContext* context) const {
std::unique_ptr<HloInstruction> clone =
CloneWithNewShape(shape_, suffix, context);
return clone;
}
std::pair<const HloInstruction*, ShapeIndex>
HloInstruction::LatestNonGteAncestorAndIndex() const {
const HloInstruction* hlo = this;
ShapeIndex index;
while (hlo->opcode() == HloOpcode::kGetTupleElement) {
index.push_back(hlo->tuple_index());
hlo = hlo->operand(0);
}
std::reverse(index.begin(), index.end());
return {hlo, index};
}
const HloInstruction* HloInstruction::LatestNonGteAncestor() const {
const HloInstruction* hlo = this;
while (hlo->opcode() == HloOpcode::kGetTupleElement) {
hlo = hlo->operand(0);
}
return hlo;
}
const HloInstruction* HloInstruction::operand(int64_t i) const {
return operands_[i];
}
HloInstruction* HloInstruction::mutable_operand(int64_t i) {
CHECK(operands_[i] != nullptr);
return operands_[i];
}
int64_t HloInstruction::operand_index(const HloInstruction* target) const {
for (int64_t i = 0; i < operand_count(); ++i) {
if (target == operand(i)) {
return i;
}
}
LOG(FATAL) << "target was not an operand: " << target->ToString();
}
std::vector<int64_t> HloInstruction::operand_indices(
const HloInstruction* target) const {
std::vector<int64_t> indices;
for (int64_t i = 0; i < operand_count(); ++i) {
if (target == operand(i)) {
indices.push_back(i);
}
}
if (indices.empty()) {
LOG(FATAL) << "target was not an operand: " << target->ToString();
}
return indices;
}
HloInstruction::InstructionVector HloInstruction::unique_operands() const {
InstructionVector unique;
absl::flat_hash_set<const HloInstruction*> seen;
for (HloInstruction* operand : operands()) {
if (seen.insert(operand).second) {
unique.push_back(operand);
}
}
return unique;
}
absl::Status HloInstruction::AddControlDependencyTo(
HloInstruction* instruction) {
TF_RET_CHECK(instruction->parent() == parent());
if (!absl::c_linear_search(control_successors(), instruction)) {
mutable_rare()->control_successors.push_back(instruction);
TF_RET_CHECK(!absl::c_linear_search(
instruction->rare()->control_predecessors, this));
instruction->mutable_rare()->control_predecessors.push_back(this);
}
return absl::OkStatus();
}
absl::Status HloInstruction::RemoveControlDependencyTo(
HloInstruction* instruction) {
TF_RET_CHECK(instruction->parent() == parent());
if (has_rare()) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&mutable_rare()->control_successors, instruction));
}
if (instruction->has_rare()) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&instruction->mutable_rare()->control_predecessors, this));
}
return absl::OkStatus();
}
absl::Status HloInstruction::DropAllControlDeps() {
if (has_rare()) {
for (auto* ctrl_succ : rare()->control_successors) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&ctrl_succ->mutable_rare()->control_predecessors, this));
}
for (auto* ctrl_pred : rare()->control_predecessors) {
TF_RETURN_IF_ERROR(EraseElementFromVector(
&ctrl_pred->mutable_rare()->control_successors, this));
}
Rare* r = mutable_rare();
r->control_successors.clear();
r->control_predecessors.clear();
}
return absl::OkStatus();
}
absl::Status HloInstruction::SafelyDropAllControlDependencies() {
if (has_rare()) {
for (HloInstruction* predecessor : rare()->control_predecessors) {
for (HloInstruction* successor : rare()->control_successors) {
TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(successor));
}
}
}
TF_RETURN_IF_ERROR(DropAllControlDeps());
return absl::OkStatus();
}
bool HloInstruction::HasControlDependencies() const {
const Rare* r = rare();
return (!r->control_predecessors.empty() || !r->control_successors.empty());
}
absl::Status HloInstruction::CopyAllControlDepsTo(HloInstruction* start,
HloInstruction* end) const {
for (auto* ctrl_pred : control_predecessors()) {
TF_RETURN_IF_ERROR(ctrl_pred->AddControlDependencyTo(start));
}
for (auto* ctrl_succ : control_successors()) {
TF_RETURN_IF_ERROR(end->AddControlDependencyTo(ctrl_succ));
}
return absl::OkStatus();
}
bool HloInstruction::IdenticalInternal(
const HloInstruction& other,
absl::FunctionRef<bool(const HloInstruction*, const HloInstruction*)>
eq_operands,
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>
eq_computations,
bool layout_sensitive, bool sharding_sensitive,
bool ignore_channel_id_values,
bool ignore_commutative_operand_order) const {
if (this == &other) {
return true;
}
if (opcode() != other.opcode()) {
return false;
}
if (!(layout_sensitive ? ShapeUtil::Equal(shape(), other.shape())
: ShapeUtil::Compatible(shape(), other.shape()))) {
return false;
}
if (sharding_sensitive && has_sharding() && other.has_sharding() &&
sharding() != other.sharding()) {
return false;
}
if (operands().size() != other.operands().size()) {
return false;
}
if (ignore_commutative_operand_order &&
HloOpcodeIsBinaryCommutative(opcode())) {
CHECK_EQ(operand_count(), 2);
if (!(eq_operands(operand(0), other.operand(0)) &&
eq_operands(operand(1), other.operand(1))) &&
!(eq_operands(operand(0), other.operand(1)) &&
eq_operands(operand(1), other.operand(0)))) {
return false;
}
} else {
for (size_t i = 0; i < operands().size(); ++i) {
if (!eq_operands(operand(i), other.operand(i))) {
return false;
}
}
}
if (backend_config_ != other.backend_config_) {
return false;
}
if (ignore_channel_id_values) {
if (auto channel_inst = DynCast<HloChannelInstruction>(this)) {
return channel_inst->IdenticalSlowPathIgnoringChannelIdValues(
other, eq_computations);
}
}
return IdenticalSlowPath(other, eq_computations);
}
void HloInstruction::AppendOperand(HloInstruction* operand) {
if (operand->parent() != nullptr) {
DCHECK(!operand->parent()->IsMarkedAsDead(operand))
<< "Operand " << operand->name() << " is already marked dead";
}
operands_.push_back(operand);
operand->AddUser(this);
}
void HloInstruction::RemoveOperandsAtAscendingIndices(
absl::Span<const int> ascending_indices) {
if (ascending_indices.empty()) {
return;
}
int next_index = 0;
int removed_count = 0;
for (int to_remove : ascending_indices) {
while (next_index < to_remove) {
operands_[next_index - removed_count] = operands_[next_index];
++next_index;
}
CHECK_LT(to_remove, operands_.size());
++removed_count;
++next_index;
}
while (next_index < operands_.size()) {
operands_[next_index - removed_count] = operands_[next_index];
++next_index;
}
CHECK_EQ(removed_count, ascending_indices.size());
operands_.resize(operands_.size() - removed_count);
}
bool HloInstruction::HasConstantOperand() const {
for (const HloInstruction* operand : operands_) {
if (operand->IsConstant()) {
return true;
}
}
return false;
}
bool HloInstruction::IdenticalSlowPath(
const HloInstruction& other,
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>
eq_computations) const {
switch (opcode()) {
case HloOpcode::kAbs:
case HloOpcode::kAllGatherDone:
case HloOpcode::kAllReduceDone:
case HloOpcode::kAtan2:
case HloOpcode::kAdd:
case HloOpcode::kBitcast:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCeil:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCollectivePermuteDone:
case HloOpcode::kComplex:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCopyStart:
case HloOpcode::kCopyDone:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kAnd:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kOptimizationBarrier:
case HloOpcode::kPartitionId:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kRemainder:
case HloOpcode::kReshape:
case HloOpcode::kDynamicReshape:
case HloOpcode::kReplicaId:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kStochasticConvert:
case HloOpcode::kCbrt:
case HloOpcode::kSubtract:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTuple:
return true;
case HloOpcode::kAfterAll:
case HloOpcode::kAddDependency:
return false;
case HloOpcode::kCall:
return eq_computations(to_apply(), other.to_apply());
case HloOpcode::kConditional:
for (int j = 0; j < branch_count(); ++j) {
if (!eq_computations(branch_computation(j),
other.branch_computation(j))) {
return false;
}
}
return true;
case HloOpcode::kWhile:
return (eq_computations(while_body(), other.while_body()) &&
eq_computations(while_condition(), other.while_condition()));
case HloOpcode::kAsyncStart:
case HloOpcode::kAsyncUpdate:
case HloOpcode::kAsyncDone:
case HloOpcode::kBatchNormTraining:
case HloOpcode::kBatchNormInference:
case HloOpcode::kBatchNormGrad:
case HloOpcode::kFft:
case HloOpcode::kCompare:
case HloOpcode::kSend:
case HloOpcode::kSendDone:
case HloOpcode::kRecv:
case HloOpcode::kRecvDone:
case HloOpcode::kReverse:
case HloOpcode::kConcatenate:
case HloOpcode::kReduce:
case HloOpcode::kSort:
case HloOpcode::kTranspose:
case HloOpcode::kBroadcast:
case HloOpcode::kMap:
case HloOpcode::kSlice:
case HloOpcode::kConstant:
case HloOpcode::kIota:
case HloOpcode::kFusion:
case HloOpcode::kRng:
case HloOpcode::kRngBitGenerator:
case HloOpcode::kRngGetAndUpdateState:
case HloOpcode::kParameter:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReducePrecision:
case HloOpcode::kInfeed:
case HloOpcode::kOutfeed:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConvolution:
case HloOpcode::kCustomCall:
case HloOpcode::kReduceWindow:
case HloOpcode::kSelectAndScatter:
case HloOpcode::kPad:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kScatter:
case HloOpcode::kDot:
case HloOpcode::kDomain:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kTriangularSolve:
case HloOpcode::kCholesky:
case HloOpcode::kTopK:
LOG(FATAL) << "Base class impl called for opcode with subclass: "
<< opcode();
}
return false;
}
absl::Status HloInstruction::ReplaceUseWith(HloInstruction* user,
HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "this shape: " << ShapeUtil::HumanString(shape())
<< ", replacement shape: "
<< ShapeUtil::HumanString(new_producer->shape());
return ReplaceUseWithDifferentShape(user, new_producer);
}
absl::Status HloInstruction::ReplaceUseWithDifferentShape(
HloInstruction* user, HloInstruction* new_producer) {
VLOG(3) << "Replacing uses of " << name() << " in " << user->name()
<< " with " << new_producer->name();
RemoveUser(user);
TF_RET_CHECK(absl::c_count(user->operands_, this) >= 0);
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
if (user->opcode() == HloOpcode::kFusion) {
TF_RETURN_IF_ERROR(
Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
}
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceUseWith(HloInstruction* user,
int operand_number,
HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "this shape: " << ShapeUtil::HumanString(shape())
<< ", replacement shape: "
<< ShapeUtil::HumanString(new_producer->shape());
return ReplaceUseWithDifferentShape(user, operand_number, new_producer);
}
absl::Status HloInstruction::ReplaceUseWithDifferentShape(
HloInstruction* user, int operand_number, HloInstruction* new_producer) {
VLOG(3) << "Replacing operand " << operand_number << " of " << name()
<< " in " << user->name() << " with " << new_producer->name();
if (absl::c_count(user->operands_, this) == 1) {
RemoveUser(user);
}
TF_RET_CHECK(user->operand(operand_number) == this)
<< "Expected operand " << operand_number << " of " << user->ToString()
<< " to be equal to " << ToString();
user->operands_[operand_number] = new_producer;
new_producer->AddUser(user);
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceOperandWith(int64_t operand_num,
HloInstruction* new_operand) {
auto old_operand = operand(operand_num);
TF_RET_CHECK(ShapeUtil::CompatibleIgnoringFpPrecision(old_operand->shape(),
new_operand->shape()))
<< old_operand->shape() << " is not compatible with "
<< new_operand->shape();
return ReplaceOperandWithDifferentShape(operand_num, new_operand);
}
absl::Status HloInstruction::ReplaceOperandWithDifferentShape(
int64_t operand_num, HloInstruction* new_operand) {
TF_RET_CHECK(operand_num >= 0);
TF_RET_CHECK(operand_num < operand_count());
HloInstruction* old_operand = mutable_operand(operand_num);
if (old_operand == new_operand) {
return absl::OkStatus();
}
operands_[operand_num] = new_operand;
VLOG(3) << "Replacing operand " << operand_num << " of " << name() << " with "
<< new_operand->name() << ", was " << old_operand->name();
if (!absl::c_linear_search(operands_, old_operand)) {
old_operand->RemoveUser(this);
}
new_operand->AddUser(this);
return absl::OkStatus();
}
absl::Status HloInstruction::Defuse() {
if (opcode() != HloOpcode::kFusion) {
return absl::OkStatus();
}
VLOG(2) << "Defusing instruction: " << ToString();
HloComputation* fused_computation = fused_instructions_computation();
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
defused_instructions;
for (int64_t i = 0; i < operand_count(); ++i) {
defused_instructions[fused_computation->parameter_instruction(i)] =
mutable_operand(i);
}
for (HloInstruction* fused_instruction :
fused_computation->MakeInstructionPostOrder()) {
if (fused_instruction->opcode() == HloOpcode::kParameter) {
continue;
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : fused_instruction->operands()) {
new_operands.push_back(defused_instructions.at(operand));
}
HloInstruction* defused_instruction =
parent()->AddInstruction(fused_instruction->CloneWithNewOperands(
fused_instruction->shape(), new_operands));
defused_instructions[fused_instruction] = defused_instruction;
}
TF_RETURN_IF_ERROR(
ReplaceAllUsesWith(defused_instructions.at(fused_expression_root())));
HloModule* module = GetModule();
TF_RETURN_IF_ERROR(parent()->RemoveInstruction(this));
return module->RemoveEmbeddedComputation(fused_computation);
}
absl::StatusOr<HloInstruction*> HloInstruction::UnfuseInstruction(
HloInstruction* instruction) {
CHECK_EQ(opcode(), HloOpcode::kFusion);
std::vector<HloInstruction*> new_operands;
for (int64_t operand_num = 0; operand_num < instruction->operand_count();
++operand_num) {
HloInstruction* operand = instruction->mutable_operand(operand_num);
if (operand->opcode() == HloOpcode::kParameter) {
HloInstruction* extracted_operand =
mutable_operand(operand->parameter_number());
new_operands.push_back(extracted_operand);
} else if (operand->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant = AddInstruction(operand->Clone());
new_operands.push_back(cloned_constant);
} else if (operand->opcode() == HloOpcode::kBroadcast &&
operand->operand(0)->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant =
AddInstruction(operand->operand(0)->Clone());
new_operands.push_back(AddInstruction(
operand->CloneWithNewOperands(operand->shape(), {cloned_constant})));
} else {
return InvalidArgument(
"Unsupported operand type for unfusing: %s. Currently only "
"parameters and constants are supported.",
operand->ToString());
}
}
HloInstruction* unfused_instruction = AddInstruction(
instruction->CloneWithNewOperands(instruction->shape(), new_operands));
HloComputation* fusion_computation = fused_instructions_computation();
HloInstruction* new_parameter = AddFusionOperand(unfused_instruction);
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(new_parameter));
TF_RETURN_IF_ERROR(
fusion_computation->RemoveInstructionAndUnusedOperands(instruction));
return unfused_instruction;
}
absl::Status HloInstruction::ReplaceUsesWith(
absl::Span<HloInstruction* const> users, HloInstruction* new_producer) {
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< shape() << " is not compatible with " << new_producer->shape();
return ReplaceAllUsesWithDifferentShape(users, new_producer);
}
absl::Status HloInstruction::ReplaceAllUsesWithDifferentShape(
absl::Span<HloInstruction* const> users, HloInstruction* new_producer) {
std::vector<HloInstruction*> users_vector(users.begin(), users.end());
for (HloInstruction* user : users_vector) {
TF_RETURN_IF_ERROR(ReplaceUseWithDifferentShape(user, new_producer));
}
if (parent_ && parent_->root_instruction() == this) {
parent_->set_root_instruction(new_producer,
true);
}
return absl::OkStatus();
}
absl::Status HloInstruction::ReplaceAllUsesWith(HloInstruction* new_producer,
absl::string_view trigger) {
auto print_options = HloPrintOptions::ShortParsable()
.set_print_operand_shape(true)
.set_print_extra_attributes(false);
TF_RET_CHECK(
ShapeUtil::CompatibleIgnoringFpPrecision(shape(), new_producer->shape()))
<< "The shape doesn't match when replacing '" << ToString(print_options)
<< "' with '" << new_producer->ToString(print_options) << "'. " << shape()
<< " is not compatible with " << new_producer->shape() << "\n '"
<< trigger << "' triggered this wrong replacement.";
return ReplaceAllUsesWithDifferentShape(new_producer);
}
absl::Status HloInstruction::ReplaceAllUsesWithDifferentShape(
HloInstruction* new_producer) {
bool new_producer_is_user = false;
std::vector<HloInstruction*> users_vector(users().begin(), users().end());
for (HloInstruction* user : users_vector) {
if (user == new_producer) {
new_producer_is_user = true;
} else {
std::replace(user->operands_.begin(), user->operands_.end(), this,
new_producer);
new_producer->AddUser(user);
if (user->opcode() == HloOpcode::kFusion) {
TF_RETURN_IF_ERROR(
Cast<HloFusionInstruction>(user)->DeduplicateFusionOperands());
}
}
}
users_.Clear();
if (new_producer_is_user) {
AddUser(new_producer);
}
if (parent_ && parent_->root_instruction() == this) {
parent_->set_root_instruction(new_producer,
true);
}
return absl::OkStatus();
}
bool HloInstruction::IsEffectiveBitcast() const {
return opcode_ == HloOpcode::kBitcast ||
(opcode_ == HloOpcode::kTranspose &&
ShapeUtil::TransposeIsBitcast(operand(0)->shape(), shape(),
dimensions()));
}
HloComputation* HloInstruction::to_apply() const {
if (has_to_apply()) {
CHECK_EQ(called_computations().size(), 1)
<< "Expected a to_apply computation for " << opcode();
return called_computations()[0];
}
LOG(FATAL) << "Invalid opcode for to_apply(): " << opcode();
}
void HloInstruction::set_to_apply(HloComputation* computation) {
if (has_to_apply()) {
CHECK_EQ(called_computations().size(), 1)
<< "Expected a to_apply computation for " << opcode();
rare_->called_computations[0] = computation;
return;
}
LOG(FATAL) << "Invalid opcode for to_apply(): " << opcode();
}
bool HloInstruction::has_to_apply() const {
switch (opcode_) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kCall:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kReduceWindow:
case HloOpcode::kScatter:
case HloOpcode::kSort:
return true;
case HloOpcode::kCustomCall:
return called_computations().size() == 1;
default:
return false;
}
}
HloComputation* HloInstruction::while_condition() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return called_computations()[kConditionComputationIndex];
}
HloComputation* HloInstruction::while_body() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return called_computations()[kBodyComputationIndex];
}
void HloInstruction::set_while_condition(HloComputation* computation) {
CHECK_EQ(HloOpcode::kWhile, opcode_);
rare_->called_computations[kConditionComputationIndex] = computation;
}
void HloInstruction::set_while_body(HloComputation* computation) {
CHECK_EQ(HloOpcode::kWhile, opcode_);
rare_->called_computations[kBodyComputationIndex] = computation;
}
HloInstruction* HloInstruction::while_init() const {
CHECK_EQ(HloOpcode::kWhile, opcode_);
return operands_[0];
}
HloComputation* HloInstruction::true_computation() const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_EQ(PRED, operand(0)->shape().element_type());
return called_computations()[kTrueComputationIndex];
}
HloComputation* HloInstruction::false_computation() const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_EQ(PRED, operand(0)->shape().element_type());
return called_computations()[kFalseComputationIndex];
}
const PtrVec<HloComputation*>& HloInstruction::branch_computations() const {
CHECK(HloOpcode::kConditional == opcode_);
return called_computations();
}
int32_t HloInstruction::branch_count() const {
CHECK(HloOpcode::kConditional == opcode_);
return called_computations().size();
}
HloComputation* HloInstruction::branch_computation(int32_t b) const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_GE(b, 0);
CHECK_LT(b, called_computations().size());
return called_computations()[b];
}
int32_t HloInstruction::branch_index(HloComputation* computation) const {
CHECK_EQ(HloOpcode::kConditional, opcode_);
CHECK_NE(computation, nullptr);
for (int32_t idx = 0; idx < branch_count(); idx++) {
if (branch_computation(idx) == computation) {
return idx;
}
}
LOG(FATAL) << absl::StrFormat("Conditional %s does not contain branch %s",
name(), computation->name());
}
void HloInstruction::set_branch_computation(int b,
HloComputation* computation) {
CHECK_EQ(HloOpcode::kConditional, opcode_);
rare_->called_computations[b] = computation;
}
std::string HloInstruction::SignatureString() const {
std::string operands =
StrJoin(operands_, ", ", [](std::string* out, HloInstruction* operand) {
StrAppend(out, ShapeUtil::HumanString(operand->shape()));
});
return StrCat("(", operands, ") -> ", ShapeUtil::HumanString(shape()));
}
absl::string_view PrintName(absl::string_view name, bool print_ids) {
if (print_ids) {
return name;
} else {
auto dot_position = name.find_first_of('.');
return name.substr(0, dot_position);
}
}
namespace {
using DFSStack = absl::InlinedVector<std::pair<int, HloInstruction*>, 16>;
void PrintNameInternal(Printer* printer, absl::string_view name,
const HloPrintOptions& options) {
if (options.print_percent()) {
printer->Append("%");
}
printer->Append(PrintName(name, options.print_ids()));
}
std::string PrintCycle(const HloInstruction* child, DFSStack* dfs_stack,
bool ignore_control_predecessors) {
absl::flat_hash_set<const HloInstruction*> subgraph;
while (!dfs_stack->empty() && dfs_stack->back().second != child) {
subgraph.insert(dfs_stack->back().second);
dfs_stack->pop_back();
}
absl::flat_hash_set<const HloInstruction*> visited;
absl::InlinedVector<const HloInstruction*, 16> dfs;
dfs.push_back(child);
std::string result;
while (!dfs.empty() && result.empty()) {
bool found_next_instr = false;
auto process_users_or_successors =
[&](const std::vector<HloInstruction*>& users_or_successors) {
for (const auto& user : users_or_successors) {
if (user == child) {
dfs.push_back(child);
result = "\n\nDirected cycle:\n " +
absl::StrJoin(
dfs, "\n ",
[](std::string* out, const HloInstruction* instr) {
absl::StrAppend(out, instr->name());
});
return;
}
if (!subgraph.contains(user) || visited.contains(user)) {
continue;
}
visited.insert(user);
dfs.push_back(user);
found_next_instr = true;
}
};
const HloInstruction* back = dfs.back();
process_users_or_successors(back->users());
if (!ignore_control_predecessors) {
process_users_or_successors(back->control_successors());
}
if (!found_next_instr) {
dfs.pop_back();
}
}
return result;
}
}
void HloInstruction::Print(Printer* printer,
const HloPrintOptions& options) const {
CanonicalNameMap new_map;
PrintWithCanonicalNameMap(printer, options, &new_map);
}
std::string HloInstruction::ToString(const HloPrintOptions& options) const {
StringPrinter printer;
Print(&printer, options);
return std::move(printer).ToString();
}
std::string HloInstruction::ToString() const {
return ToString(HloPrintOptions::Default());
}
bool HloInstruction::IsOpElementwise(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAbs:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kCeil:
case HloOpcode::kClz:
case HloOpcode::kConvert:
case HloOpcode::kBitcastConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kImag:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kNot:
case HloOpcode::kNegate:
case HloOpcode::kPopulationCount:
case HloOpcode::kReal:
case HloOpcode::kReducePrecision:
case HloOpcode::kRsqrt:
case HloOpcode::kLogistic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSqrt:
case HloOpcode::kCbrt:
case HloOpcode::kTan:
case HloOpcode::kTanh:
return true;
case HloOpcode::kAdd:
case HloOpcode::kAtan2:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kDivide:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kPower:
case HloOpcode::kRemainder:
case HloOpcode::kSubtract:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kStochasticConvert:
return true;
case HloOpcode::kSelect:
case HloOpcode::kClamp:
return true;
default:
return false;
}
}
bool HloInstruction::IsElementwiseImpl(
const std::optional<int64_t>& operand_idx) const {
if (opcode_ == HloOpcode::kDynamicUpdateSlice) {
return operand_idx.has_value() && operand_idx.value() == 0;
}
if (opcode_ == HloOpcode::kBitcastConvert &&
primitive_util::BitWidth(shape_.element_type()) !=
primitive_util::BitWidth(operands_[0]->shape().element_type())) {
return false;
}
return IsOpElementwise(opcode_);
}
bool HloInstruction::IsCrossModuleAllReduce() const {
if (opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kAllReduceStart) {
return channel_id() != std::nullopt;
} else if (opcode() == HloOpcode::kAllReduceDone) {
CHECK_EQ(operand_count(), 1);
const HloInstruction* operand = this->operand(0);
CHECK_EQ(operand->opcode(), HloOpcode::kAllReduceStart);
return operand->channel_id() != std::nullopt;
}
return false;
}
bool HloInstruction::IsCrossReplicaAllReduce() const {
if (opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kAllReduceStart) {
return channel_id() == std::nullopt;
} else if (opcode() == HloOpcode::kAllReduceDone) {
CHECK_EQ(operand_count(), 1);
const HloInstruction* operand = this->operand(0);
CHECK_EQ(operand->opcode(), HloOpcode::kAllReduceStart);
return operand->channel_id() == std::nullopt;
}
return false;
}
void HloInstruction::PrintWithCanonicalNameMap(
Printer* printer, const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
if (options.canonicalize_instruction_names()) {
if (options.is_in_nested_computation()) {
DCHECK(!options.print_percent());
printer->Append(canonical_name_map->LookupOrInsert(unique_id()));
printer->Append(" = ");
}
} else {
PrintNameInternal(printer, name(), options);
printer->Append(" = ");
}
if (options.print_result_shape()) {
if (options.include_layout_in_shapes()) {
ShapeUtil::PrintHumanStringWithLayout(printer, shape());
} else {
ShapeUtil::PrintHumanString(printer, shape());
}
printer->Append(" ");
}
if (options.syntax_sugar_async_ops() && HloOpcodeIsAsync(opcode()) &&
(async_wrapped_computation() &&
async_wrapped_computation()->CanExpandIntoSingleInstruction())) {
absl::string_view suffix = [&]() {
switch (opcode()) {
case HloOpcode::kAsyncStart:
return "-start";
case HloOpcode::kAsyncUpdate:
return "-update";
default:
CHECK(opcode() == HloOpcode::kAsyncDone)
<< "Unexpected async opcode: " << opcode();
return "-done";
}
}();
printer->Append(HloOpcodeString(async_wrapped_opcode()));
printer->Append(suffix);
} else {
printer->Append(HloOpcodeString(opcode()));
}
printer->Append("(");
PrintOperandsWithCanonicalNameMap(printer, options, canonical_name_map);
printer->Append(")");
AttributePrinter attr_printer([printer]() {
printer->Append(", ");
return printer;
});
PrintExtraAttributes(attr_printer, options);
if (original_value_) {
printer->Append(", origin={");
printer->Append(OriginalValueToString(*original_value()));
printer->Append("}");
}
if (options.print_metadata() &&
(!metadata_->op_type().empty() || !metadata_->op_name().empty() ||
!metadata_->source_file().empty() ||
!metadata_->scheduling_name().empty())) {
printer->Append(", metadata={");
printer->Append(xla::OpMetadataToString(
*metadata_, options.print_metadata_only_op_name()));
printer->Append("}");
}
if (options.print_backend_config() && !backend_config_.empty()) {
absl::string_view config = backend_config_.GetRawString();
std::string sorted_config;
if (options.sort_backend_config()) {
sorted_config = SortJson(config).value_or(std::string(config));
config = sorted_config;
}
printer->Append(", backend_config=");
if (LexesAsJsonDict(config)) {
printer->Append(config);
} else {
printer->Append("\"");
printer->Append(CEscape(config));
printer->Append("\"");
}
}
}
void HloInstruction::PrintOperandsWithCanonicalNameMap(
Printer* printer, const HloPrintOptions& options,
CanonicalNameMap* canonical_name_map) const {
if (operands_.empty()) return;
absl::Span<HloInstruction* const> slice(operands_);
constexpr int64_t kMaxOperandsToShowIfCompact = 4;
if (options.compact_operands() &&
slice.size() > kMaxOperandsToShowIfCompact) {
slice.remove_suffix(slice.size() - kMaxOperandsToShowIfCompact);
}
auto print_one = [&](const HloInstruction* operand) {
if (operand == nullptr) {
printer->Append("null ");
return;
}
bool add_space = false;
if (options.print_operand_shape()) {
if (options.include_layout_in_shapes()) {
ShapeUtil::PrintHumanStringWithLayout(printer, operand->shape());
} else {
ShapeUtil::PrintHumanString(printer, operand->shape());
}
add_space = true;
}
if (options.canonicalize_instruction_names()) {
if (options.is_in_nested_computation()) {
DCHECK(!options.print_percent());
if (add_space) printer->Append(" ");
printer->Append(
canonical_name_map->LookupOrInsert(operand->unique_id()));
}
} else if (options.print_operand_names()) {
if (add_space) printer->Append(" ");
PrintNameInternal(printer, operand->name(), options);
}
};
print_one(slice[0]);
for (int64_t i = 1; i < slice.size(); ++i) {
if (options.print_operand_index_annotation_interval() != 0 &&
i % options.print_operand_index_annotation_interval() == 0) {
printer->Append(absl::StrFormat(", ", i));
} else {
printer->Append(", ");
}
print_one(slice[i]);
}
const int64_t remaining = operands_.size() - slice.size();
if (remaining > 0) {
printer->Append(", ...(+");
printer->Append(remaining);
printer->Append(")");
}
}
namespace {
bool IsSequentialCall(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kCall:
case HloOpcode::kConditional:
case HloOpcode::kWhile:
return true;
default:
return false;
}
}
}
void HloInstruction::PrintExtraAttributes(
AttributePrinter& printer, const HloPrintOptions& options) const {
if (options.print_extra_attributes()) {
PrintExtraAttributesImpl(printer, options);
}
const auto subcomputation_mode = options.print_subcomputation_mode();
if (subcomputation_mode ==
HloPrintOptions::PrintSubcomputationMode::kNameOnly) {
if (opcode() == HloOpcode::kWhile) {
printer.Next([this, &options](Printer* printer) {
printer->Append("condition=");
PrintNameInternal(printer, while_condition()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("body=");
PrintNameInternal(printer, while_body()->name(), options);
});
} else if (opcode() == HloOpcode::kSelectAndScatter) {
printer.Next([this, &options](Printer* printer) {
printer->Append("select=");
PrintNameInternal(printer, select()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("scatter=");
PrintNameInternal(printer, scatter()->name(), options);
});
} else if (opcode() == HloOpcode::kConditional) {
if (operand(0)->shape().element_type() == PRED) {
printer.Next([this, &options](Printer* printer) {
printer->Append("true_computation=");
PrintNameInternal(printer, true_computation()->name(), options);
});
printer.Next([this, &options](Printer* printer) {
printer->Append("false_computation=");
PrintNameInternal(printer, false_computation()->name(), options);
});
} else {
printer.Next([this, &options](Printer* printer) {
printer->Append("branch_computations={");
AppendJoin(printer, branch_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
printer->Append("}");
});
}
} else if (opcode() == HloOpcode::kCall || opcode() == HloOpcode::kMap ||
opcode() == HloOpcode::kReduceWindow ||
opcode() == HloOpcode::kReduce ||
opcode() == HloOpcode::kAllReduce ||
opcode() == HloOpcode::kReduceScatter ||
opcode() == HloOpcode::kAllReduceStart ||
opcode() == HloOpcode::kScatter ||
opcode() == HloOpcode::kTopK || opcode() == HloOpcode::kSort) {
if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("to_apply=");
PrintNameInternal(printer, to_apply()->name(), options);
});
}
if (opcode() == HloOpcode::kCall && is_composite()) {
printer.Next(
[](Printer* printer) { printer->Append("is_composite=true"); });
}
} else if (opcode() == HloOpcode::kCustomCall) {
if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("called_computations={");
AppendJoin(printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
printer->Append("}");
});
}
} else if (HloOpcodeIsAsync(opcode())) {
if (opcode() == HloOpcode::kAsyncStart &&
(!options.syntax_sugar_async_ops() ||
(async_wrapped_computation() &&
!async_wrapped_computation()->CanExpandIntoSingleInstruction()))) {
printer.Next([this, &options](Printer* printer) {
printer->Append("calls=");
PrintNameInternal(printer, async_wrapped_computation()->name(),
options);
});
}
} else if (!called_computations().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("calls=");
AppendJoin(printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
PrintNameInternal(printer, computation->name(), options);
});
});
}
} else if ((subcomputation_mode ==
HloPrintOptions::PrintSubcomputationMode::kFullBodies) ||
(subcomputation_mode == HloPrintOptions::PrintSubcomputationMode::
kNonSequentialBodies &&
!IsSequentialCall(opcode()))) {
HloPrintOptions new_options = options;
new_options.set_is_in_nested_computation(true);
switch (opcode()) {
case HloOpcode::kWhile:
printer.Next([this, &new_options](Printer* printer) {
printer->Append("condition=\n");
while_condition()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("body=\n");
while_body()->Print(printer, new_options);
});
break;
case HloOpcode::kSelectAndScatter:
printer.Next([this, &new_options](Printer* printer) {
printer->Append("select=\n");
select()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("scatter=\n");
scatter()->Print(printer, new_options);
});
break;
case HloOpcode::kConditional:
if (operand(0)->shape().element_type() == PRED) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("true_computation=\n");
true_computation()->Print(printer, new_options);
});
printer.Next([this, &new_options](Printer* printer) {
printer->Append("false_computation=\n");
false_computation()->Print(printer, new_options);
});
} else {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("branch_computations={\n");
AppendJoin(
printer, branch_computations(), ",\n",
[&](Printer* printer, const HloComputation* computation) {
computation->Print(printer, new_options);
});
printer->Append("\n}");
});
}
break;
case HloOpcode::kCall:
case HloOpcode::kMap:
case HloOpcode::kReduceWindow:
case HloOpcode::kReduce:
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
case HloOpcode::kScatter:
case HloOpcode::kSort:
case HloOpcode::kTopK:
if (!called_computations().empty()) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("to_apply=\n");
to_apply()->Print(printer, new_options);
});
}
if (opcode() == HloOpcode::kCall && is_composite()) {
printer.Next(
[](Printer* printer) { printer->Append("is_composite=true"); });
}
break;
default:
if (!called_computations().empty()) {
printer.Next([this, &new_options](Printer* printer) {
printer->Append("calls=\n");
AppendJoin(
printer, called_computations(), ", ",
[&](Printer* printer, const HloComputation* computation) {
computation->Print(printer, new_options);
});
});
}
break;
}
}
if (has_sharding()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("sharding=");
sharding().Print(printer, options.print_metadata());
});
}
if (!frontend_attributes().map().empty()) {
printer.Next([this](Printer* printer) {
AppendCat(printer, "frontend_attributes=",
FrontendAttributesToString(frontend_attributes()));
});
}
if (opcode() != HloOpcode::kCall) {
CHECK(!is_composite())
<< "Only kCall instructions should have is_composite set";
}
if (options.print_control_dependencies() && !control_predecessors().empty()) {
printer.Next([this, &options](Printer* printer) {
printer->Append("control-predecessors={");
AppendJoin(printer, control_predecessors(), ", ",
[&](Printer* printer, HloInstruction* pre) {
PrintNameInternal(printer, pre->name(), options);
});
printer->Append("}");
});
}
if (!statistics_viz().statistics().empty()) {
printer.Next([this](Printer* printer) {
AppendCat(printer,
"statistics=", StatisticsVizToString(statistics_viz()));
});
}
}
std::vector<std::string> HloInstruction::ExtraAttributesToString(
const HloPrintOptions& options) const {
class MultiStringPrinter : public Printer {
public:
void Append(const absl::AlphaNum& a) override {
if (strings_.empty()) {
strings_.push_back({});
}
absl::StrAppend(&strings_.back(), a);
}
void Next() { strings_.push_back({}); }
std::vector<std::string> ConsumeStrings() && { return std::move(strings_); }
private:
std::vector<std::string> strings_;
} multi_string_printer;
AttributePrinter attr_printer([&multi_string_printer] {
multi_string_printer.Next();
return &multi_string_printer;
});
PrintExtraAttributes(attr_printer, options);
return std::move(multi_string_printer).ConsumeStrings();
}
std::string FrontendAttributesToString(
const FrontendAttributes& frontend_attributes) {
std::vector<std::pair<std::string, std::string>> sorted_attributes(
frontend_attributes.map().begin(), frontend_attributes.map().end());
absl::c_sort(sorted_attributes);
const auto formatter = [](std::string* out,
const std::pair<std::string, std::string>& item) {
if (LexesAsJsonDict(item.second)) {
absl::StrAppend(out, item.first, "=", item.second);
} else {
absl::StrAppend(out, item.first, "=\"", item.second, "\"");
}
};
return absl::StrFormat("{%s}",
absl::StrJoin(sorted_attributes, ",", formatter));
}
std::string HloInstruction::ToShortString() const {
return StrCat("%", name(), " = ", HloOpcodeString(opcode()), "(",
StrJoin(operands_, ", ",
[](std::string* out, HloInstruction* operand) {
StrAppend(out, "%", operand->name());
}),
")");
}
HloInstructionProto HloInstruction::ToProto() const {
HloInstructionProto proto;
CHECK(unique_id_ != -1)
<< "This instruction does not have a valid id. Please make sure the "
"instruction is inside a module before dumping it.";
proto.set_id(unique_id_);
proto.set_name(name_);
*proto.mutable_opcode() = std::string(HloOpcodeString(opcode_));
*proto.mutable_shape() = shape_.ToProto();
for (const HloInstruction* operand : operands_) {
proto.add_operand_ids(operand->unique_id());
}
for (const HloInstruction* control : control_predecessors()) {
proto.add_control_predecessor_ids(control->unique_id());
}
*proto.mutable_metadata() = *metadata_;
proto.set_backend_config(backend_config_.GetRawString());
if (opcode() != HloOpcode::kFusion) {
for (const HloComputation* computation : called_computations()) {
proto.add_called_computation_ids(computation->unique_id());
}
}
if (has_sharding()) {
*proto.mutable_sharding() = sharding().ToProto();
}
*proto.mutable_frontend_attributes() = frontend_attributes();
proto.set_is_composite(is_composite());
*proto.mutable_statistics_viz() = statistics_viz();
if (original_value_) {
*proto.mutable_original_value() = OriginalValueToProto(*original_value_);
}
return proto;
}
std::string HloInstruction::ToCategory() const {
if (opcode() == HloOpcode::kTranspose || opcode() == HloOpcode::kCopy ||
opcode() == HloOpcode::kReshape ||
opcode() == HloOpcode::kDynamicReshape) {
return "data formatting";
}
if (IsElementwise()) {
return "non-fusion elementwise";
}
return std::string(HloOpcodeString(opcode()));
}
bool HloInstruction::IsFused() const {
return parent_ != nullptr && parent_->IsFusionComputation();
}
bool HloInstruction::IsCustomCall(absl::string_view target) const {
return opcode() == HloOpcode::kCustomCall && custom_call_target() == target;
}
bool HloInstruction::IsCustomCall(
absl::Span<const absl::string_view> targets) const {
return opcode() == HloOpcode::kCustomCall &&
absl::c_linear_search(targets, custom_call_target());
}
bool HloInstruction::IsInputFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kInput;
}
bool HloInstruction::IsLoopFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kLoop;
}
bool HloInstruction::IsOutputFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kOutput;
}
bool HloInstruction::IsCustomFusion() const {
return opcode() == HloOpcode::kFusion && fusion_kind() == FusionKind::kCustom;
}
bool HloInstruction::IsFusible() const {
switch (opcode_) {
case HloOpcode::kDomain:
case HloOpcode::kParameter:
case HloOpcode::kWhile:
case HloOpcode::kConditional:
case HloOpcode::kCall:
return false;
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kReduceWindow:
return true;
case HloOpcode::kRng:
return user_count() <= 1;
default:
return !HasSideEffect();
}
}
HloInstruction::HloInstruction(HloOpcode opcode, const Shape& shape)
: unique_id_(-1),
index_in_parent_(~0u),
opcode_(opcode),
is_default_config_(false),
cleaned_up_(false),
marked_as_dead_(false),
is_root_(false),
shape_(shape),
name_(HloOpcodeString(opcode)) {
TF_DCHECK_OK(ShapeUtil::ValidateShapeWithOptionalLayout(shape_));
}
template <typename HloInstructionPtr>
absl::Status HloInstruction::Visit(
DfsHloVisitorBase<HloInstructionPtr>* visitor) {
switch (opcode_) {
case HloOpcode::kAbs:
return visitor->HandleAbs(this);
case HloOpcode::kAtan2:
return visitor->HandleAtan2(this);
case HloOpcode::kRoundNearestAfz:
return visitor->HandleRound(this);
case HloOpcode::kRoundNearestEven:
return visitor->HandleRoundNearestEven(this);
case HloOpcode::kBatchNormTraining:
return visitor->HandleBatchNormTraining(this);
case HloOpcode::kBatchNormInference:
return visitor->HandleBatchNormInference(this);
case HloOpcode::kBatchNormGrad:
return visitor->HandleBatchNormGrad(this);
case HloOpcode::kErf:
return visitor->HandleErf(this);
case HloOpcode::kLogistic:
return visitor->HandleLogistic(this);
case HloOpcode::kSign:
return visitor->HandleSign(this);
case HloOpcode::kConstant:
return visitor->HandleConstant(this);
case HloOpcode::kGetTupleElement:
return visitor->HandleGetTupleElement(this);
case HloOpcode::kParameter:
return visitor->HandleParameter(this);
case HloOpcode::kCompare:
return visitor->HandleCompare(this);
case HloOpcode::kComplex:
return visitor->HandleComplex(this);
case HloOpcode::kAdd:
return visitor->HandleAdd(this);
case HloOpcode::kDivide:
return visitor->HandleDivide(this);
case HloOpcode::kSubtract:
return visitor->HandleSubtract(this);
case HloOpcode::kMaximum:
return visitor->HandleMaximum(this);
case HloOpcode::kMinimum:
return visitor->HandleMinimum(this);
case HloOpcode::kAnd:
return visitor->HandleAnd(this);
case HloOpcode::kOr:
return visitor->HandleOr(this);
case HloOpcode::kXor:
return visitor->HandleXor(this);
case HloOpcode::kShiftLeft:
return visitor->HandleShiftLeft(this);
case HloOpcode::kShiftRightArithmetic:
return visitor->HandleShiftRightArithmetic(this);
case HloOpcode::kShiftRightLogical:
return visitor->HandleShiftRightLogical(this);
case HloOpcode::kConcatenate:
return visitor->HandleConcatenate(this);
case HloOpcode::kConvert:
return visitor->HandleConvert(this);
case HloOpcode::kBitcastConvert:
return visitor->HandleBitcastConvert(this);
case HloOpcode::kStochasticConvert:
return visitor->HandleStochasticConvert(this);
case HloOpcode::kCopy:
return visitor->HandleCopy(this);
case HloOpcode::kMultiply:
return visitor->HandleMultiply(this);
case HloOpcode::kDot:
return visitor->HandleDot(this);
case HloOpcode::kPower:
return visitor->HandlePower(this);
case HloOpcode::kRemainder:
return visitor->HandleRemainder(this);
case HloOpcode::kSelect:
return visitor->HandleSelect(this);
case HloOpcode::kConvolution:
return visitor->HandleConvolution(this);
case HloOpcode::kFft:
return visitor->HandleFft(this);
case HloOpcode::kAllGather:
return visitor->HandleAllGather(this);
case HloOpcode::kAllGatherStart:
return visitor->HandleAllGatherStart(this);
case HloOpcode::kAllGatherDone:
return visitor->HandleAllGatherDone(this);
case HloOpcode::kAllReduce:
return visitor->HandleAllReduce(this);
case HloOpcode::kReduceScatter:
return visitor->HandleReduceScatter(this);
case HloOpcode::kAllReduceStart:
return visitor->HandleAllReduceStart(this);
case HloOpcode::kAllReduceDone:
return visitor->HandleAllReduceDone(this);
case HloOpcode::kAllToAll:
return visitor->HandleAllToAll(this);
case HloOpcode::kCollectiveBroadcast:
return visitor->HandleCollectiveBroadcast(this);
case HloOpcode::kCollectivePermute:
return visitor->HandleCollectivePermute(this);
case HloOpcode::kCollectivePermuteStart:
return visitor->HandleCollectivePermuteStart(this);
case HloOpcode::kCollectivePermuteDone:
return visitor->HandleCollectivePermuteDone(this);
case HloOpcode::kReplicaId:
return visitor->HandleReplicaId(this);
case HloOpcode::kPartitionId:
return visitor->HandlePartitionId(this);
case HloOpcode::kTuple:
return visitor->HandleTuple(this);
case HloOpcode::kMap:
return visitor->HandleMap(this);
case HloOpcode::kClamp:
return visitor->HandleClamp(this);
case HloOpcode::kReduce:
return visitor->HandleReduce(this);
case HloOpcode::kReduceWindow:
return visitor->HandleReduceWindow(this);
case HloOpcode::kSelectAndScatter:
return visitor->HandleSelectAndScatter(this);
case HloOpcode::kNegate:
return visitor->HandleNegate(this);
case HloOpcode::kExp:
return visitor->HandleExp(this);
case HloOpcode::kExpm1:
return visitor->HandleExpm1(this);
case HloOpcode::kFloor:
return visitor->HandleFloor(this);
case HloOpcode::kCeil:
return visitor->HandleCeil(this);
case HloOpcode::kClz:
return visitor->HandleClz(this);
case HloOpcode::kLog:
return visitor->HandleLog(this);
case HloOpcode::kLog1p:
return visitor->HandleLog1p(this);
case HloOpcode::kTan:
return visitor->HandleTan(this);
case HloOpcode::kTanh:
return visitor->HandleTanh(this);
case HloOpcode::kCos:
return visitor->HandleCos(this);
case HloOpcode::kSin:
return visitor->HandleSin(this);
case HloOpcode::kSqrt:
return visitor->HandleSqrt(this);
case HloOpcode::kCbrt:
return visitor->HandleCbrt(this);
case HloOpcode::kRsqrt:
return visitor->HandleRsqrt(this);
case HloOpcode::kReal:
return visitor->HandleReal(this);
case HloOpcode::kImag:
return visitor->HandleImag(this);
case HloOpcode::kIsFinite:
return visitor->HandleIsFinite(this);
case HloOpcode::kNot:
return visitor->HandleNot(this);
case HloOpcode::kPopulationCount:
return visitor->HandlePopulationCount(this);
case HloOpcode::kBitcast:
return visitor->HandleBitcast(this);
case HloOpcode::kBroadcast:
return visitor->HandleBroadcast(this);
case HloOpcode::kPad:
return visitor->HandlePad(this);
case HloOpcode::kReshape:
return visitor->HandleReshape(this);
case HloOpcode::kDynamicReshape:
return visitor->HandleDynamicReshape(this);
case HloOpcode::kTranspose:
return visitor->HandleTranspose(this);
case HloOpcode::kReverse:
return visitor->HandleReverse(this);
case HloOpcode::kReducePrecision:
return visitor->HandleReducePrecision(this);
case HloOpcode::kSlice:
return visitor->HandleSlice(this);
case HloOpcode::kDynamicSlice:
return visitor->HandleDynamicSlice(this);
case HloOpcode::kDynamicUpdateSlice:
return visitor->HandleDynamicUpdateSlice(this);
case HloOpcode::kSort:
return visitor->HandleSort(this);
case HloOpcode::kInfeed:
return visitor->HandleInfeed(this);
case HloOpcode::kOutfeed:
return visitor->HandleOutfeed(this);
case HloOpcode::kRng:
return visitor->HandleRng(this);
case HloOpcode::kRngBitGenerator:
return visitor->HandleRngBitGenerator(this);
case HloOpcode::kRngGetAndUpdateState:
return visitor->HandleRngGetAndUpdateState(this);
case HloOpcode::kWhile:
return visitor->HandleWhile(this);
case HloOpcode::kFusion:
return visitor->HandleFusion(this);
case HloOpcode::kCall:
return visitor->HandleCall(this);
case HloOpcode::kConditional:
return visitor->HandleConditional(this);
case HloOpcode::kCustomCall:
return visitor->HandleCustomCall(this);
case HloOpcode::kAsyncStart:
return visitor->HandleAsyncStart(this);
case HloOpcode::kAsyncUpdate:
return visitor->HandleAsyncUpdate(this);
case HloOpcode::kAsyncDone:
return visitor->HandleAsyncDone(this);
case HloOpcode::kCopyStart:
return visitor->HandleCopyStart(this);
case HloOpcode::kCopyDone:
return visitor->HandleCopyDone(this);
case HloOpcode::kRecv:
return visitor->HandleRecv(this);
case HloOpcode::kTopK:
return visitor->HandleTopK(this);
case HloOpcode::kRecvDone:
return visitor->HandleRecvDone(this);
case HloOpcode::kSend:
return visitor->HandleSend(this);
case HloOpcode::kSendDone:
return visitor->HandleSendDone(this);
case HloOpcode::kGather:
return visitor->HandleGather(this);
case HloOpcode::kScatter:
return visitor->HandleScatter(this);
case HloOpcode::kDomain:
return visitor->HandleDomain(this);
case HloOpcode::kAfterAll:
return visitor->HandleAfterAll(this);
case HloOpcode::kAddDependency:
return visitor->HandleAddDependency(this);
case HloOpcode::kIota:
return visitor->HandleIota(this);
case HloOpcode::kGetDimensionSize:
return visitor->HandleGetDimensionSize(this);
case HloOpcode::kSetDimensionSize:
return visitor->HandleSetDimensionSize(this);
case HloOpcode::kTriangularSolve:
return visitor->HandleTriangularSolve(this);
case HloOpcode::kCholesky:
return visitor->HandleCholesky(this);
case HloOpcode::kOptimizationBarrier:
return visitor->HandleOptimizationBarrier(this);
default:
return Internal(
"Unhandled HloOpcode for DfsHloVisitor: %s. This should not happen - "
"please file a bug for XLA.",
HloOpcodeString(opcode_));
}
}
template absl::Status HloInstruction::Visit(DfsHloVisitor* visitor);
template absl::Status HloInstruction::Visit(ConstDfsHloVisitor* visitor);
template <typename Visitor>
inline bool PushDFSChild(Visitor* visitor, DFSStack* dfs_stack,
HloInstruction* child) {
CHECK(child != nullptr);
const int id = child->unique_id();
CHECK_GE(id, 0) << "instruction may not have a parent computation";
switch (visitor->GetVisitState(id)) {
case Visitor::kVisiting:
return false;
case Visitor::kVisited:
return true;
case Visitor::kNotVisited:
dfs_stack->push_back(std::make_pair(id, child));
return true;
}
}
using InternalCompareFunction =
absl::FunctionRef<bool(std::pair<int, const HloInstruction*>,
std::pair<int, const HloInstruction*>)>;
template <typename Visitor>
static absl::Status PostOrderDFS(
HloInstruction* root, Visitor* visitor,
std::optional<InternalCompareFunction> operand_order,
bool ignore_control_predecessors, bool cross_computation) {
visitor->ReserveVisitStates(root->parent()->instruction_count());
DFSStack dfs_stack;
dfs_stack.emplace_back(root->unique_id(), root);
do {
DCHECK(!dfs_stack.empty());
int current_id = dfs_stack.back().first;
HloInstruction* current_node = dfs_stack.back().second;
CHECK_GE(current_id, 0) << current_id << ": " << current_node
<< ": instruction may not have parent computation";
typename Visitor::VisitState visit_state =
visitor->GetVisitState(current_id);
if (visit_state == Visitor::kVisited) {
dfs_stack.pop_back();
VLOG(3) << "Not visiting HLO (id = " << current_id
<< ") as it was already visited.";
continue;
}
if (visit_state == Visitor::kVisiting) {
dfs_stack.pop_back();
TF_RETURN_IF_ERROR(visitor->Preprocess(current_node));
VLOG(2) << "Visiting HLO %" << current_node->name();
TF_RETURN_IF_ERROR(current_node->Visit(visitor));
visitor->SetVisitState(current_id, Visitor::kVisited);
TF_RETURN_IF_ERROR(visitor->Postprocess(current_node));
continue;
}
visitor->SetVisitState(current_id, Visitor::kVisiting);
const size_t old_dfs_stack_size = dfs_stack.size();
for (HloInstruction* child : current_node->operands()) {
if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(child, &dfs_stack, ignore_control_predecessors));
}
}
if (!ignore_control_predecessors) {
for (HloInstruction* child : current_node->control_predecessors()) {
if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(child, &dfs_stack, ignore_control_predecessors));
}
}
}
if (cross_computation) {
for (const HloComputation* called_computation :
current_node->called_computations()) {
HloInstruction* root_instruction =
called_computation->root_instruction();
if (!ABSL_PREDICT_TRUE(
PushDFSChild(visitor, &dfs_stack, root_instruction))) {
return FailedPrecondition(
"A cycle is detected while visiting instruction %s %s",
current_node->ToString(),
PrintCycle(root_instruction, &dfs_stack,
ignore_control_predecessors));
}
}
}
if (operand_order != std::nullopt) {
std::sort(dfs_stack.begin() + old_dfs_stack_size, dfs_stack.end(),
*operand_order);
}
std::reverse(dfs_stack.begin() + old_dfs_stack_size, dfs_stack.end());
} while (!dfs_stack.empty());
return absl::OkStatus();
}
template <typename HloInstructionPtr>
absl::Status HloInstruction::Accept(
DfsHloVisitorBase<HloInstructionPtr>* visitor, bool call_finish_visit,
bool ignore_control_predecessors, bool cross_computation) {
VLOG(3) << "HloInstruction::Accept(%" << name() << ")";
TF_RETURN_IF_ERROR(PostOrderDFS(this, visitor, std::nullopt,
ignore_control_predecessors,
cross_computation));
if (call_finish_visit) {
TF_RETURN_IF_ERROR(visitor->FinishVisit(this));
}
return absl::OkStatus();
}
template absl::Status HloInstruction::Accept(DfsHloVisitor*, bool, bool, bool);
template absl::Status HloInstruction::Accept(ConstDfsHloVisitor*, bool, bool,
bool);
absl::Status HloInstruction::AcceptWithOperandOrder(
DfsHloVisitor* visitor, CompareFunction operand_order,
bool call_finish_visit) {
VLOG(2) << "HloInstruction::AcceptWithOperandOrder(%" << name() << ")";
auto func = [operand_order](std::pair<int, const HloInstruction*> a,
std::pair<int, const HloInstruction*> b) {
return operand_order(a.second, b.second);
};
TF_RETURN_IF_ERROR(PostOrderDFS(this, visitor, func,
false,
false));
if (call_finish_visit) {
VLOG(3) << "HloInstruction::AcceptWithOperandOrder BEFORE FINISH VISIT";
TF_RETURN_IF_ERROR(visitor->FinishVisit(this));
VLOG(3) << "HloInstruction::AcceptWithOperandOrder AFTER FINISH VISIT";
}
VLOG(2) << "HloInstruction::AcceptWithOperandOrder EXIT";
return absl::OkStatus();
}
const Shape& HloInstruction::shape() const { return shape_; }
absl::InlinedVector<int64_t, 4> HloInstruction::OperandIndices(
const HloInstruction* operand) const {
absl::InlinedVector<int64_t, 4> result;
for (int64_t i = 0; i < operand_count(); ++i) {
if (this->operand(i) == operand) {
result.push_back(i);
}
}
return result;
}
bool HloInstruction::IsElementwiseBinary() const {
return IsElementwise() && operand_count() == 2;
}
bool HloInstruction::IsElementwise() const {
return IsElementwiseImpl(std::nullopt);
}
bool HloInstruction::IsElementwiseOnOperand(int64_t operand_idx) const {
return IsElementwiseImpl(operand_idx);
}
namespace {
enum class UseKind { kReuse = 0, kUse = 1, kNoUse = 2 };
class FusionReusesParamElements {
public:
static UseKind Compute(int64_t i, const HloInstruction& hlo) {
absl::flat_hash_map<const HloInstruction*, UseKind> memoization_cache;
return ComputeInternal(i, hlo, &memoization_cache);
}
private:
static UseKind ComputeInternal(
int64_t outer_param_num, const HloInstruction& hlo,
absl::flat_hash_map<const HloInstruction*, UseKind>* cache);
};
}
static UseKind OperandElementUse(const HloInstruction& instr,
int64_t operand_num) {
switch (instr.opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
case HloOpcode::kGather:
return UseKind::kUse;
case HloOpcode::kPad:
return operand_num > 0 ? UseKind::kReuse : UseKind::kUse;
case HloOpcode::kReduce:
return operand_num >= Cast<HloReduceInstruction>(&instr)->input_count()
? UseKind::kReuse
: UseKind::kUse;
case HloOpcode::kFusion:
return FusionReusesParamElements::Compute(operand_num,
*instr.fused_expression_root());
case HloOpcode::kDot:
if (instr.shape().dimensions_size() <= 1) {
if ((operand_num == 0 && instr.operand(1)->shape().rank() <= 1) ||
(operand_num == 1 && instr.operand(0)->shape().rank() <= 1)) {
return UseKind::kUse;
}
}
return UseKind::kReuse;
case HloOpcode::kDynamicUpdateSlice:
if (operand_num == 0 || operand_num == 1) {
return UseKind::kUse;
}
return UseKind::kReuse;
default:
return instr.IsElementwise() ? UseKind::kUse : UseKind::kReuse;
}
}
UseKind FusionReusesParamElements::ComputeInternal(
int64_t outer_param_num, const HloInstruction& hlo,
absl::flat_hash_map<const HloInstruction*, UseKind>* cache) {
if (auto hlo_param = DynCast<HloParameterInstruction>(&hlo)) {
if (hlo_param->parameter_number() == outer_param_num) {
return UseKind::kUse;
}
}
auto p = cache->emplace(&hlo, UseKind::kNoUse);
auto value_it = p.first;
const bool key_is_new = p.second;
if (!key_is_new) {
return value_it->second;
}
for (int64_t operand_num = 0; operand_num < hlo.operands().size();
++operand_num) {
UseKind old_val = value_it->second;
UseKind new_val = [&] {
UseKind hlo_use = OperandElementUse(hlo, operand_num);
if (hlo_use == UseKind::kNoUse) {
return old_val;
}
UseKind operand_use =
ComputeInternal(outer_param_num, *hlo.operand(operand_num), cache);
if (operand_use == UseKind::kNoUse) {
return old_val;
}
return std::min({old_val, hlo_use, operand_use});
}();
value_it = cache->find(&hlo);
value_it->second = new_val;
if (new_val == UseKind::kReuse) {
break;
}
}
return value_it->second;
}
bool HloInstruction::ReusesOperandElements(int64_t i) const {
return OperandElementUse(*this, i) == UseKind::kReuse;
}
std::optional<ShapeUtil::ShapeEqualityDescriptor>
HloInstruction::ReshapeMerelyInsertsOrDeletes1SizedDimensions() const {
if (HloOpcode::kReshape != opcode_) {
return std::nullopt;
}
return ShapeUtil::InsertedOrDeleted1SizedDimensions(operand(0)->shape_,
shape_);
}
absl::string_view ToString(HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kLoop:
return "kLoop";
case HloInstruction::FusionKind::kInput:
return "kInput";
case HloInstruction::FusionKind::kOutput:
return "kOutput";
case HloInstruction::FusionKind::kCustom:
return "kCustom";
}
}
absl::StatusOr<HloInstruction::FusionKind> StringToFusionKind(
absl::string_view kind_name) {
if (kind_name == "kLoop") {
return HloInstruction::FusionKind::kLoop;
}
if (kind_name == "kInput") {
return HloInstruction::FusionKind::kInput;
}
if (kind_name == "kOutput") {
return HloInstruction::FusionKind::kOutput;
}
if (kind_name == "kCustom") {
return HloInstruction::FusionKind::kCustom;
}
return InvalidArgument("Unknown fusion kind: %s", kind_name);
}
std::string StatisticsVizToString(const StatisticsViz& statistics_viz) {
if (statistics_viz.statistics().empty()) return "{}";
std::vector<Statistic> all_statistics(statistics_viz.statistics().begin(),
statistics_viz.statistics().end());
const auto formatter = [](std::string* out, const Statistic& item) {
absl::StrAppend(out, item.stat_name(), "=", item.stat_val());
};
return absl::StrFormat("{%s,%s}",
absl::StrCat("visualizing_index=",
statistics_viz.stat_index_to_visualize()),
absl::StrJoin(all_statistics, ",", formatter));
}
std::string PaddingConfigToString(const PaddingConfig& padding) {
bool has_interior_padding =
absl::c_any_of(padding.dimensions(),
[](const PaddingConfig::PaddingConfigDimension& dim) {
return dim.interior_padding() != 0;
});
return StrJoin(
padding.dimensions(), "x",
[&](std::string* out, const PaddingConfig::PaddingConfigDimension& dim) {
StrAppend(
out, dim.edge_padding_low(), "_", dim.edge_padding_high(),
has_interior_padding ? StrCat("_", dim.interior_padding()) : "");
});
}
std::string RandomDistributionToString(const RandomDistribution& distribution) {
return absl::AsciiStrToLower(RandomDistribution_Name(distribution));
}
std::string RandomAlgorithmToString(const RandomAlgorithm& algorithm) {
return absl::AsciiStrToLower(RandomAlgorithm_Name(algorithm));
}
std::string PrecisionToString(const PrecisionConfig::Precision& precision) {
return absl::AsciiStrToLower(PrecisionConfig::Precision_Name(precision));
}
std::string AlgorithmToString(const PrecisionConfig::Algorithm& algorithm) {
constexpr absl::string_view kPrefix = "ALG_";
const std::string& name = PrecisionConfig::Algorithm_Name(algorithm);
DCHECK(absl::StartsWith(name, kPrefix));
return absl::AsciiStrToLower(name.substr(kPrefix.size()));
}
static std::string CustomCallScheduleToString(
const CustomCallSchedule& schedule) {
return absl::AsciiStrToLower(CustomCallSchedule_Name(schedule));
}
static std::string CustomCallApiVersionToString(
const CustomCallApiVersion& schedule) {
return absl::AsciiStrToLower(CustomCallApiVersion_Name(schedule));
}
std::string DotDimensionNumbersToString(const DotDimensionNumbers& dnums) {
std::vector<std::string> result;
if (!dnums.lhs_batch_dimensions().empty()) {
result.push_back(StrCat("lhs_batch_dims={",
StrJoin(dnums.lhs_batch_dimensions(), ","), "}"));
}
result.push_back(StrCat("lhs_contracting_dims={",
StrJoin(dnums.lhs_contracting_dimensions(), ","),
"}"));
if (!dnums.rhs_batch_dimensions().empty()) {
result.push_back(StrCat("rhs_batch_dims={",
StrJoin(dnums.rhs_batch_dimensions(), ","), "}"));
}
result.push_back(StrCat("rhs_contracting_dims={",
StrJoin(dnums.rhs_contracting_dimensions(), ","),
"}"));
return StrJoin(result, ", ");
}
std::string ConvolutionDimensionNumbersToString(
const ConvolutionDimensionNumbers& dnums) {
auto len_required = [](int64_t a, int64_t b, absl::Span<const int64_t> cs) {
return std::max({a, b, cs.empty() ? 0 : *absl::c_max_element(cs)}) + 1;
};
std::vector<std::string> lhs_dims(
len_required(dnums.input_batch_dimension(),
dnums.input_feature_dimension(),
dnums.input_spatial_dimensions()),
"?");
lhs_dims[dnums.input_batch_dimension()] = 'b';
lhs_dims[dnums.input_feature_dimension()] = 'f';
for (int64_t i = 0; i < dnums.input_spatial_dimensions().size(); ++i) {
lhs_dims[dnums.input_spatial_dimensions(i)] = StrCat(i);
}
std::vector<std::string> rhs_dims(
len_required(dnums.kernel_input_feature_dimension(),
dnums.kernel_output_feature_dimension(),
dnums.kernel_spatial_dimensions()),
"?");
rhs_dims[dnums.kernel_input_feature_dimension()] = "i";
rhs_dims[dnums.kernel_output_feature_dimension()] = "o";
for (int64_t i = 0; i < dnums.kernel_spatial_dimensions().size(); ++i) {
rhs_dims[dnums.kernel_spatial_dimensions(i)] = StrCat(i);
}
std::vector<std::string> output_dims(
len_required(dnums.output_batch_dimension(),
dnums.output_feature_dimension(),
dnums.output_spatial_dimensions()),
"?");
output_dims[dnums.output_batch_dimension()] = 'b';
output_dims[dnums.output_feature_dimension()] = 'f';
for (int64_t i = 0; i < dnums.output_spatial_dimensions().size(); ++i) {
output_dims[dnums.output_spatial_dimensions(i)] = StrCat(i);
}
return StrCat(StrJoin(lhs_dims, ""), "_", StrJoin(rhs_dims, ""), "->",
StrJoin(output_dims, ""));
}
absl::StatusOr<RandomAlgorithm> StringToRandomAlgorithm(
const std::string& name) {
static absl::flat_hash_map<std::string, RandomAlgorithm>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, RandomAlgorithm>;
for (int i = 0; i < RandomAlgorithm_ARRAYSIZE; i++) {
if (RandomAlgorithm_IsValid(i)) {
auto value = static_cast<RandomAlgorithm>(i);
(*map)[RandomAlgorithmToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown algorithm");
}
return found->second;
}
absl::StatusOr<RandomDistribution> StringToRandomDistribution(
const std::string& name) {
static absl::flat_hash_map<std::string, RandomDistribution>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, RandomDistribution>;
for (int i = 0; i < RandomDistribution_ARRAYSIZE; i++) {
if (RandomDistribution_IsValid(i)) {
auto value = static_cast<RandomDistribution>(i);
(*map)[RandomDistributionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown distribution");
}
return found->second;
}
absl::StatusOr<PrecisionConfig::Precision> StringToPrecision(
const std::string& name) {
static absl::flat_hash_map<std::string, PrecisionConfig::Precision>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, PrecisionConfig::Precision>;
for (int i = 0; i < PrecisionConfig::Precision_ARRAYSIZE; i++) {
if (PrecisionConfig::Precision_IsValid(i)) {
auto value = static_cast<PrecisionConfig::Precision>(i);
(*map)[PrecisionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown precision");
}
return found->second;
}
absl::StatusOr<PrecisionConfig::Algorithm> StringToAlgorithm(
const std::string& name) {
static absl::flat_hash_map<std::string, PrecisionConfig::Algorithm>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, PrecisionConfig::Algorithm>;
for (int i = 0; i < PrecisionConfig::Algorithm_ARRAYSIZE; i++) {
if (PrecisionConfig::Algorithm_IsValid(i)) {
auto value = static_cast<PrecisionConfig::Algorithm>(i);
(*map)[AlgorithmToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown algorithm");
}
return found->second;
}
absl::StatusOr<CustomCallSchedule> StringToCustomCallSchedule(
absl::string_view name) {
static const absl::flat_hash_map<std::string, CustomCallSchedule>* map = [] {
static auto* map = new absl::flat_hash_map<std::string, CustomCallSchedule>;
for (int i = 0; i < CustomCallSchedule_ARRAYSIZE; i++) {
if (CustomCallSchedule_IsValid(i)) {
auto value = static_cast<CustomCallSchedule>(i);
(*map)[CustomCallScheduleToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown schedule");
}
return found->second;
}
absl::StatusOr<CustomCallApiVersion> StringToCustomCallApiVersion(
absl::string_view name) {
static const absl::flat_hash_map<std::string, CustomCallApiVersion>* map =
[] {
static auto* map =
new absl::flat_hash_map<std::string, CustomCallApiVersion>;
for (int i = 0; i < CustomCallApiVersion_ARRAYSIZE; i++) {
if (CustomCallApiVersion_IsValid(i)) {
auto value = static_cast<CustomCallApiVersion>(i);
(*map)[CustomCallApiVersionToString(value)] = value;
}
}
return map;
}();
auto found = map->find(absl::AsciiStrToLower(name));
if (found == map->end()) {
return InvalidArgument("Unknown API version");
}
return found->second;
}
std::ostream& operator<<(std::ostream& os, HloInstruction::FusionKind kind) {
return os << ToString(kind);
}
bool HloPtrComparator::operator()(const HloInstruction* const& lhs,
const HloInstruction* const& rhs) const {
if (rhs == nullptr) {
return false;
}
if (lhs == nullptr) {
return true;
}
auto lhs_module = lhs->GetModule();
auto rhs_module = rhs->GetModule();
CHECK((lhs_module == nullptr && rhs_module == nullptr) ||
(lhs_module != nullptr && rhs_module != nullptr));
if (lhs_module != nullptr &&
lhs_module->unique_id() != rhs_module->unique_id()) {
return lhs_module->unique_id() < rhs_module->unique_id();
}
return lhs->unique_id() < rhs->unique_id();
}
const PrecisionConfig& HloInstruction::precision_config() const {
if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->precision_config();
}
if (auto* dot = DynCast<HloDotInstruction>(this)) {
return dot->precision_config();
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->precision_config();
}
LOG(FATAL) << "Unimplemented method.";
}
PrecisionConfig* HloInstruction::mutable_precision_config() {
if (auto* convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->mutable_precision_config();
}
if (auto* dot = DynCast<HloDotInstruction>(this)) {
return dot->mutable_precision_config();
}
if (auto* custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->mutable_precision_config();
}
LOG(FATAL) << "Unimplemented method.";
}
HloModule* HloInstruction::GetModule() const {
if (parent_) {
return parent_->parent();
}
return nullptr;
}
void HloInstruction::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
void HloInstruction::UniquifyName(HloModule* module) {
UniquifyName(&module->instruction_name_uniquer());
}
void HloInstruction::UniquifyId(HloModule* module) {
SetUniqueId(module->NewUniqueInstructionId());
}
void HloInstruction::SortInstructionUsersAndControlLists(
const MappedPtrContainerSorter<HloInstruction>::MapPtrFn& map_fn,
const HloInstruction& sorted_instruction) {
using Sorter = MappedPtrContainerSorter<HloInstruction>;
users_.SortInstructionUsers(map_fn, sorted_instruction.users_);
absl::Status status;
if (has_rare()) {
status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction.control_predecessors(),
mutable_rare()->control_predecessors);
}
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction control predecessors for "
<< name() << "; " << status;
}
if (has_rare()) {
status = Sorter::Sort(map_fn, Sorter::IndexAfterMappedElementsFn(),
sorted_instruction.control_successors(),
mutable_rare()->control_successors);
}
if (!status.ok()) {
LOG(ERROR) << "Failed to sort instruction control successors for " << name()
<< "; " << status;
}
}
int64_t HloInstruction::feature_index() const {
return Cast<HloBatchNormInstruction>(this)->feature_index();
}
float HloInstruction::epsilon() const {
return Cast<HloBatchNormInstruction>(this)->epsilon();
}
FftType HloInstruction::fft_type() const {
return Cast<HloFftInstruction>(this)->fft_type();
}
const std::vector<int64_t>& HloInstruction::fft_length() const {
return Cast<HloFftInstruction>(this)->fft_length();
}
int64_t HloInstruction::concatenate_dimension() const {
return Cast<HloConcatenateInstruction>(this)->concatenate_dimension();
}
int64_t HloInstruction::dimension() const {
if (auto set_size = DynCast<HloSetDimensionSizeInstruction>(this)) {
return set_size->dimension();
}
return Cast<HloGetDimensionSizeInstruction>(this)->dimension();
}
int64_t HloInstruction::inferred_dimension() const {
return Cast<HloReshapeInstruction>(this)->inferred_dimension();
}
bool HloInstruction::IsRank2Transpose() const {
auto transpose = DynCast<HloTransposeInstruction>(this);
return transpose != nullptr && transpose->IsRank2Transpose();
}
int64_t HloInstruction::slice_starts(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_starts(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_starts() const {
return Cast<HloSliceInstruction>(this)->slice_starts();
}
std::vector<int64_t>* HloInstruction::mutable_slice_starts() {
return Cast<HloSliceInstruction>(this)->mutable_slice_starts();
}
int64_t HloInstruction::slice_limits(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_limits(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_limits() const {
return Cast<HloSliceInstruction>(this)->slice_limits();
}
std::vector<int64_t>* HloInstruction::mutable_slice_limits() {
return Cast<HloSliceInstruction>(this)->mutable_slice_limits();
}
int64_t HloInstruction::slice_strides(int64_t dimension) const {
return Cast<HloSliceInstruction>(this)->slice_strides(dimension);
}
const std::vector<int64_t>& HloInstruction::slice_strides() const {
return Cast<HloSliceInstruction>(this)->slice_strides();
}
std::vector<int64_t>* HloInstruction::mutable_slice_strides() {
return Cast<HloSliceInstruction>(this)->mutable_slice_strides();
}
const Literal& HloInstruction::literal() const {
return Cast<HloConstantInstruction>(this)->literal();
}
bool HloInstruction::IsConstant() const {
return DynCast<HloConstantInstruction>(this) != nullptr;
}
void HloInstruction::RelayoutConstant(const Layout& new_layout,
const ShapeIndex& shape_index) {
Cast<HloConstantInstruction>(this)->RelayoutConstant(new_layout, shape_index);
}
HloInstruction* HloInstruction::AppendInstructionIntoCalledComputation(
HloInstruction* instruction_to_append, bool add_output) {
return Cast<HloCallableInstruction>(this)
->AppendInstructionIntoCalledComputation(instruction_to_append,
add_output);
}
HloInstruction* HloInstruction::AddFusionOperand(HloInstruction* new_operand) {
return Cast<HloFusionInstruction>(this)->AddFusionOperand(new_operand);
}
void HloInstruction::MergeFusionInstruction(
HloInstruction* instruction_to_merge) {
return Cast<HloFusionInstruction>(this)->MergeFusionInstruction(
Cast<HloFusionInstruction>(instruction_to_merge));
}
void HloInstruction::MergeFusionInstructionIntoMultiOutput(
HloInstruction* instruction_to_merge) {
return Cast<HloFusionInstruction>(this)
->MergeFusionInstructionIntoMultiOutput(
Cast<HloFusionInstruction>(instruction_to_merge));
}
HloInstruction* HloInstruction::FuseInstruction(
HloInstruction* instruction_to_fuse) {
return Cast<HloFusionInstruction>(this)->FuseInstruction(instruction_to_fuse);
}
HloInstruction* HloInstruction::FuseInstructionIntoMultiOutput(
HloInstruction* instruction_to_fuse) {
return Cast<HloFusionInstruction>(this)->FuseInstructionIntoMultiOutput(
instruction_to_fuse);
}
HloComputation* HloInstruction::fused_instructions_computation() const {
return Cast<HloFusionInstruction>(this)->fused_instructions_computation();
}
HloInstruction* HloInstruction::fused_expression_root() const {
return Cast<HloFusionInstruction>(this)->fused_expression_root();
}
tsl::gtl::iterator_range<HloInstructionUnwrappingConstIterator>
HloInstruction::fused_instructions() const {
return Cast<HloFusionInstruction>(this)->fused_instructions();
}
tsl::gtl::iterator_range<HloInstructionUnwrappingIterator>
HloInstruction::fused_instructions() {
return Cast<HloFusionInstruction>(this)->fused_instructions();
}
int64_t HloInstruction::fused_instruction_count() const {
return Cast<HloFusionInstruction>(this)->fused_instruction_count();
}
HloInstruction* HloInstruction::fused_parameter(
int64_t parameter_number) const {
return Cast<HloFusionInstruction>(this)->fused_parameter(parameter_number);
}
const HloInstruction::InstructionVector& HloInstruction::fused_parameters()
const {
return Cast<HloFusionInstruction>(this)->fused_parameters();
}
bool HloInstruction::IsMultiOutputFusion() const {
const HloFusionInstruction* fusion = DynCast<HloFusionInstruction>(this);
return fusion != nullptr && fusion->IsMultiOutputFusion();
}
HloInstruction::FusionKind HloInstruction::fusion_kind() const {
return Cast<HloFusionInstruction>(this)->fusion_kind();
}
void HloInstruction::set_fusion_kind(FusionKind kind) {
return Cast<HloFusionInstruction>(this)->set_fusion_kind(kind);
}
RandomDistribution HloInstruction::random_distribution() const {
return Cast<HloRngInstruction>(this)->random_distribution();
}
int64_t HloInstruction::parameter_number() const {
return Cast<HloParameterInstruction>(this)->parameter_number();
}
void HloInstruction::set_parameter_replicated_at_leaf_buffers(
absl::Span<const bool> parameter_replicated_at_leaf_buffers) {
return Cast<HloParameterInstruction>(this)
->set_parameter_replicated_at_leaf_buffers(
parameter_replicated_at_leaf_buffers);
}
void HloInstruction::set_parameter_replicated_at_leaf_buffers(
const std::vector<bool>& parameter_replicated_at_leaf_buffers) {
return Cast<HloParameterInstruction>(this)
->set_parameter_replicated_at_leaf_buffers(
parameter_replicated_at_leaf_buffers);
}
const std::optional<std::vector<bool>>&
HloInstruction::parameter_replicated_at_leaf_buffers() const {
return Cast<HloParameterInstruction>(this)
->parameter_replicated_at_leaf_buffers();
}
int64_t HloInstruction::tuple_index() const {
return Cast<HloGetTupleElementInstruction>(this)->tuple_index();
}
void HloInstruction::set_tuple_index(int64_t new_tuple_index) {
return Cast<HloGetTupleElementInstruction>(this)->set_tuple_index(
new_tuple_index);
}
int32_t HloInstruction::exponent_bits() const {
return Cast<HloReducePrecisionInstruction>(this)->exponent_bits();
}
int32_t HloInstruction::mantissa_bits() const {
return Cast<HloReducePrecisionInstruction>(this)->mantissa_bits();
}
std::string HloInstruction::infeed_config() const {
return Cast<HloInfeedInstruction>(this)->infeed_config();
}
void HloInstruction::set_infeed_config(const std::string& config) {
return Cast<HloInfeedInstruction>(this)->set_infeed_config(config);
}
const Shape& HloInstruction::outfeed_shape() const {
return Cast<HloOutfeedInstruction>(this)->outfeed_shape();
}
Shape* HloInstruction::mutable_outfeed_shape() {
return Cast<HloOutfeedInstruction>(this)->mutable_outfeed_shape();
}
const std::string& HloInstruction::outfeed_config() const {
return Cast<HloOutfeedInstruction>(this)->outfeed_config();
}
void HloInstruction::set_outfeed_config(const std::string& config) {
return Cast<HloOutfeedInstruction>(this)->set_outfeed_config(config);
}
const std::vector<ReplicaGroup>& HloInstruction::replica_groups() const {
return Cast<HloCollectiveInstruction>(this)->replica_groups();
}
const CollectiveDeviceList& HloInstruction::device_list() const {
return Cast<HloCollectiveInstruction>(this)->device_list();
}
const std::vector<std::pair<int64_t, int64_t>>&
HloInstruction::source_target_pairs() const {
return Cast<HloCollectivePermuteInstruction>(this)->source_target_pairs();
}
std::optional<int64_t> HloInstruction::channel_id() const {
return Cast<HloChannelInstruction>(this)->channel_id();
}
void HloInstruction::set_channel_id(const std::optional<int64_t>& channel_id) {
return Cast<HloChannelInstruction>(this)->set_channel_id(channel_id);
}
const ConvolutionDimensionNumbers&
HloInstruction::convolution_dimension_numbers() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->convolution_dimension_numbers();
}
if (auto custom_call = DynCast<HloCustomCallInstruction>(this)) {
return custom_call->convolution_dimension_numbers();
}
LOG(FATAL) << "Unimplemented method.";
}
void HloInstruction::set_convolution_dimension_numbers(
const ConvolutionDimensionNumbers& dnums) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
convolution->set_convolution_dimension_numbers(dnums);
} else if (auto custom_call = DynCast<HloCustomCallInstruction>(this)) {
custom_call->set_convolution_dimension_numbers(dnums);
} else {
LOG(FATAL) << "Unimplemented method.";
}
}
int64_t HloInstruction::feature_group_count() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->feature_group_count();
}
return Cast<HloCustomCallInstruction>(this)->feature_group_count();
}
void HloInstruction::set_feature_group_count(int64_t feature_group_count) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->set_feature_group_count(feature_group_count);
}
Cast<HloCustomCallInstruction>(this)->set_feature_group_count(
feature_group_count);
}
int64_t HloInstruction::batch_group_count() const {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->batch_group_count();
}
return Cast<HloCustomCallInstruction>(this)->batch_group_count();
}
void HloInstruction::set_batch_group_count(int64_t batch_group_count) {
if (auto convolution = DynCast<HloConvolutionInstruction>(this)) {
return convolution->set_batch_group_count(batch_group_count);
}
Cast<HloCustomCallInstruction>(this)->set_batch_group_count(
batch_group_count);
}
HloComputation* HloInstruction::select() const {
return Cast<HloSelectAndScatterInstruction>(this)->select();
}
HloComputation* HloInstruction::scatter() const {
return Cast<HloSelectAndScatterInstruction>(this)->scatter();
}
void HloInstruction::set_select(HloComputation* computation) {
return Cast<HloSelectAndScatterInstruction>(this)->set_select(computation);
}
void HloInstruction::set_scatter(HloComputation* computation) {
return Cast<HloSelectAndScatterInstruction>(this)->set_scatter(computation);
}
const std::string& HloInstruction::custom_call_target() const {
return Cast<HloCustomCallInstruction>(this)->custom_call_target();
}
void HloInstruction::set_custom_call_target(absl::string_view target) {
Cast<HloCustomCallInstruction>(this)->set_custom_call_target(target);
}
const PaddingConfig& HloInstruction::padding_config() const {
return Cast<HloPadInstruction>(this)->padding_config();
}
PaddingType HloInstruction::padding_type() const {
return Cast<HloCustomCallInstruction>(this)->padding_type();
}
PaddingConfig* HloInstruction::mutable_padding_config() {
return Cast<HloPadInstruction>(this)->mutable_padding_config();
}
int64_t HloInstruction::slice_sizes(int64_t dimension) const {
return Cast<HloDynamicSliceInstruction>(this)->slice_sizes(dimension);
}
const std::vector<int64_t>& HloInstruction::dynamic_slice_sizes() const {
return Cast<HloDynamicSliceInstruction>(this)->dynamic_slice_sizes();
}
const std::vector<std::vector<int64_t>>&
HloInstruction::dynamic_slice_sizes_list() const {
return Cast<HloCollectivePermuteInstruction>(this)
->dynamic_slice_sizes_list();
}
const GatherDimensionNumbers& HloInstruction::gather_dimension_numbers() const {
return Cast<HloGatherInstruction>(this)->gather_dimension_numbers();
}
absl::Span<const int64_t> HloInstruction::gather_slice_sizes() const {
return Cast<HloGatherInstruction>(this)->gather_slice_sizes();
}
const ScatterDimensionNumbers& HloInstruction::scatter_dimension_numbers()
const {
return Cast<HloScatterInstruction>(this)->scatter_dimension_numbers();
}
const DotDimensionNumbers& HloInstruction::dot_dimension_numbers() const {
return Cast<HloDotInstruction>(this)->dot_dimension_numbers();
}
const DomainMetadata& HloInstruction::operand_side_metadata() const {
return Cast<HloDomainInstruction>(this)->operand_side_metadata();
}
const DomainMetadata& HloInstruction::user_side_metadata() const {
return Cast<HloDomainInstruction>(this)->user_side_metadata();
}
bool HloInstruction::IsAsynchronous() const {
return HloOpcodeIsAsync(opcode());
}
HloInstruction* HloInstruction::async_chain_start() const {
return Cast<HloAsyncInstruction>(this)->async_chain_start();
}
HloInstruction* HloInstruction::async_chain_done() const {
return Cast<HloAsyncInstruction>(this)->async_chain_done();
}
HloComputation* HloInstruction::async_wrapped_computation() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_computation();
}
HloInstruction* HloInstruction::async_wrapped_instruction() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_instruction();
}
HloOpcode HloInstruction::async_wrapped_opcode() const {
return Cast<HloAsyncInstruction>(this)->async_wrapped_opcode();
}
absl::string_view HloInstruction::async_execution_thread() const {
return Cast<HloAsyncInstruction>(this)->async_execution_thread();
}
void HloInstruction::set_async_execution_thread(
absl::string_view async_execution_thread) {
Cast<HloAsyncInstruction>(this)->set_async_execution_thread(
async_execution_thread);
}
void HloInstruction::set_called_computations_execution_thread(
absl::string_view async_execution_thread,
bool skip_async_execution_thread_overwrite) {
Cast<HloCallableInstruction>(this)->RecursivelySetComputationsThreadName(
async_execution_thread, skip_async_execution_thread_overwrite);
}
std::optional<int> HloInstruction::cross_program_prefetch_index() const {
return Cast<HloCopyStartInstruction>(this)->cross_program_prefetch_index();
}
ComparisonDirection HloInstruction::comparison_direction() const {
return Cast<HloCompareInstruction>(this)->direction();
}
ComparisonOrder HloInstruction::comparison_order() const {
return Cast<HloCompareInstruction>(this)->order();
}
const TriangularSolveOptions& HloInstruction::triangular_solve_options() const {
return Cast<HloTriangularSolveInstruction>(this)->triangular_solve_options();
}
const CholeskyOptions& HloInstruction::cholesky_options() const {
return Cast<HloCholeskyInstruction>(this)->cholesky_options();
}
const std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>&
HloInstruction::output_operand_aliasing() const {
return Cast<HloCallableInstruction>(this)->output_to_operand_aliasing();
}
void HloInstruction::set_output_to_operand_aliasing(
std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>>
aliasing) {
Cast<HloCallableInstruction>(this)->set_output_to_operand_aliasing(
std::move(aliasing));
}
std::shared_ptr<OriginalValue> HloInstruction::original_value() const {
return original_value_;
}
void HloInstruction::set_original_value(
std::shared_ptr<OriginalValue> original_value) {
original_value_ = original_value;
}
} | #include "xla/hlo/ir/hlo_instruction.h"
#include <cstddef>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloInstructionTest : public HloTestBase {
protected:
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
class OpAndUserCollectingVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return Unimplemented("not implemented %s",
HloOpcodeString(hlo_instruction->opcode()));
}
absl::Status HandleParameter(HloInstruction* parameter) override {
EXPECT_FALSE(count_.contains(parameter));
count_[parameter] = GetCountsForNode(parameter);
return absl::OkStatus();
}
absl::Status HandleConstant(HloInstruction* constant) override {
EXPECT_FALSE(count_.contains(constant));
count_[constant] = GetCountsForNode(constant);
return absl::OkStatus();
}
absl::Status HandleAdd(HloInstruction* add) override {
auto lhs = add->operand(0);
auto rhs = add->operand(1);
EXPECT_FALSE(count_.contains(add));
EXPECT_TRUE(count_.contains(lhs));
EXPECT_TRUE(count_.contains(rhs));
count_[add] = GetCountsForNode(add);
return absl::OkStatus();
}
absl::Status HandleNegate(HloInstruction* negate) override {
auto operand = negate->operand(0);
EXPECT_FALSE(count_.contains(negate));
EXPECT_TRUE(count_.contains(operand));
count_[negate] = GetCountsForNode(negate);
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override {
EXPECT_FALSE(count_.contains(map));
for (HloInstruction* arg : map->operands()) {
EXPECT_TRUE(count_.contains(arg));
}
count_[map] = GetCountsForNode(map);
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* reduce) override {
auto arg = reduce->operand(0);
auto init_value = reduce->operand(1);
EXPECT_FALSE(count_.contains(reduce));
EXPECT_TRUE(count_.contains(arg));
EXPECT_TRUE(count_.contains(init_value));
count_[reduce] = GetCountsForNode(reduce);
return absl::OkStatus();
}
int64_t NumOperands(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.operand_count;
}
int64_t NumUsers(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.user_count;
}
private:
struct NumOpsAndUsers {
int64_t operand_count;
int64_t user_count;
};
NumOpsAndUsers GetCountsForNode(const HloInstruction* node) {
NumOpsAndUsers counts{node->operand_count(), node->user_count()};
return counts;
}
absl::flat_hash_map<const HloInstruction*, NumOpsAndUsers> count_;
};
TEST_F(HloInstructionTest, BasicProperties) {
auto parameter = HloInstruction::CreateParameter(1, r0f32_, "foo");
EXPECT_EQ(HloOpcode::kParameter, parameter->opcode());
EXPECT_TRUE(ShapeUtil::IsScalarWithElementType(parameter->shape(), F32));
EXPECT_FALSE(ShapeUtil::IsScalarWithElementType(parameter->shape(), S32));
EXPECT_FALSE(parameter->operand_count());
}
TEST_F(HloInstructionTest, UserWithTwoOperands) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(add->operands(), UnorderedElementsAre(foo, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add));
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(0, visitor.NumUsers(add));
EXPECT_EQ(1, visitor.NumUsers(foo));
EXPECT_EQ(1, visitor.NumUsers(bar));
}
TEST_F(HloInstructionTest, MultipleUsers) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(0, exp1->user_count());
EXPECT_EQ(0, exp2->user_count());
EXPECT_EQ(0, add->user_count());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(3, visitor.NumUsers(foo));
}
TEST_F(HloInstructionTest, RepeatedUser) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, add->operand_count());
}
TEST_F(HloInstructionTest, MultipleUsersAndOperands) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, c0));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c0, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(addtotal->Accept(&visitor));
EXPECT_EQ(2, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
}
TEST_F(HloInstructionTest, MultipleUsersAndOperandsWithUnaryOps) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto neg1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, c0));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, neg1));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, neg1, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto neg2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, addtotal));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(neg2->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumUsers(neg1));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
EXPECT_EQ(1, visitor.NumOperands(neg2));
EXPECT_EQ(0, visitor.NumUsers(neg2));
}
TEST_F(HloInstructionTest, TrivialMap) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto module = CreateNewVerifiedModule();
auto embedded_builder = HloComputation::Builder("f32+1");
auto param = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto value = embedded_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, value));
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10, {param0}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(map->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(0, visitor.NumUsers(map));
EXPECT_EQ(1, visitor.NumOperands(map));
}
TEST_F(HloInstructionTest, TrivialReduce) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32v100 = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto embedded_builder = HloComputation::Builder("f32+f32");
auto paramx = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto paramy = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, paramx, paramy));
auto module = CreateNewVerifiedModule();
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(f32v100, param0, const0,
{1}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(reduce->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(1, visitor.NumUsers(const0));
EXPECT_EQ(0, visitor.NumUsers(reduce));
EXPECT_EQ(2, visitor.NumOperands(reduce));
}
TEST_F(HloInstructionTest, ReplaceUseInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(add_foofoo, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(add_foofoo->operands(), ElementsAre(bar, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInVariadicOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto baz =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0f32_, "baz"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({foo, bar, baz, foo}));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(tuple, add_foobar));
ASSERT_IS_OK(foo->ReplaceUseWith(tuple, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(tuple->operands(), ElementsAre(bar, bar, baz, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInUnaryOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(exp, log));
EXPECT_EQ(0, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(exp, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(log));
EXPECT_THAT(log->operands(), ElementsAre(foo));
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(*bar->users().begin(), exp);
EXPECT_EQ(1, exp->operands().size());
EXPECT_EQ(*exp->operands().begin(), bar);
}
TEST_F(HloInstructionTest, ReplaceAllUsesWithInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
}
TEST_F(HloInstructionTest, ReplaceAllUsesInMultipleOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({foo, bar}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(2, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(3, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, exp, tuple));
}
class NodeCollectorAndPostProcessor : public DfsHloVisitorWithDefault {
public:
NodeCollectorAndPostProcessor() {}
absl::Status Postprocess(HloInstruction* hlo) override {
post_processed_nodes_.push_back(hlo);
return absl::OkStatus();
}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
visited_nodes_.push_back(hlo_instruction);
return absl::OkStatus();
}
const std::vector<const HloInstruction*>& visited_nodes() {
return visited_nodes_;
}
const std::vector<const HloInstruction*>& post_processed_nodes() {
return post_processed_nodes_;
}
private:
std::vector<const HloInstruction*> visited_nodes_;
std::vector<const HloInstruction*> post_processed_nodes_;
};
bool Distinct(const std::vector<const HloInstruction*>& vec) {
std::set<const HloInstruction*> distinct_nodes(vec.begin(), vec.end());
return distinct_nodes.size() == vec.size();
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodes) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp, log));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodesMultiComputation) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
c.3 = f32[] add(c.1, c.2)
c.4 = f32[] constant(4)
ROOT ret = f32[] multiply(c.4, c.3)
}
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_EQ(add1, module->entry_computation()->root_instruction());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add1->Accept(&visitor, true,
false,
true));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, SingletonFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, BinaryFusionOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{add}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(fusion));
EXPECT_THAT(constant2->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, ChainFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp3, exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
OpMetadata metadata;
metadata.set_op_name("tf_op");
exp1->set_metadata(metadata);
exp2->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->operand(0)->metadata()));
std::string new_name = "foobarfoo";
auto cloned = fusion->CloneWithNewOperands(fusion->shape(), {}, new_name);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
size_t index = cloned->name().rfind(new_name);
EXPECT_TRUE(index != std::string::npos);
}
TEST_F(HloInstructionTest, BinaryCallOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({add});
EXPECT_THAT(call->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(call));
EXPECT_THAT(constant2->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, ChainCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, MultiOutputCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto exp4 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp3, exp4));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
call->AppendInstructionIntoCalledComputation(exp4, true);
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_EQ(add->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(0)->operands(), ElementsAre(call));
EXPECT_EQ(add->operand(1)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(1)->operands(), ElementsAre(call));
}
TEST_F(HloInstructionTest, AsyncOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, "parallel_thread"));
auto* async_start = async_done->operand(0);
EXPECT_EQ(async_start->shape().tuple_shapes_size(), 3);
EXPECT_EQ(async_start->async_execution_thread(), "parallel_thread");
EXPECT_EQ(async_done->async_execution_thread(), "parallel_thread");
EXPECT_TRUE(ShapeUtil::Equal(async_start->shape().tuple_shapes(2),
ShapeUtil::MakeScalarShape(U32)));
EXPECT_EQ(async_start->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_EQ(async_done->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_THAT(async_start->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(async_start));
EXPECT_THAT(constant2->users(), ElementsAre(async_start));
EXPECT_EQ(computation->root_instruction(), async_done);
}
TEST_F(HloInstructionTest, AsyncOpWithDeps) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant3, constant4));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
TF_ASSERT_OK(add1->AddControlDependencyTo(add));
TF_ASSERT_OK(add->AddControlDependencyTo(add2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, "parallel_thread"));
auto* async_start = async_done->operand(0);
EXPECT_EQ(async_start->control_predecessors().size(), 1);
EXPECT_EQ(async_start->control_predecessors()[0], add1);
EXPECT_EQ(async_done->control_successors().size(), 1);
EXPECT_EQ(async_done->control_successors()[0], add2);
EXPECT_EQ(async_start->shape().tuple_shapes_size(), 3);
EXPECT_EQ(async_start->async_execution_thread(), "parallel_thread");
EXPECT_EQ(async_done->async_execution_thread(), "parallel_thread");
EXPECT_TRUE(ShapeUtil::Equal(async_start->shape().tuple_shapes(2),
ShapeUtil::MakeScalarShape(U32)));
EXPECT_EQ(async_start->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_EQ(async_done->async_wrapped_computation()->execution_thread(),
"parallel_thread");
EXPECT_THAT(async_start->operands(), ElementsAre(constant1, constant2));
}
TEST_F(HloInstructionTest, PreserveOutfeedShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto shape10 = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {1, 0});
auto shape01 = ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 2}, {0, 1});
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto outfeed10 = builder.AddInstruction(
HloInstruction::CreateOutfeed(shape10, constant, token, ""));
auto outfeed01 = builder.AddInstruction(
HloInstruction::CreateOutfeed(shape01, constant, token, ""));
auto clone01 = builder.AddInstruction(outfeed01->Clone());
auto clone10 = builder.AddInstruction(outfeed10->Clone());
EXPECT_TRUE(ShapeUtil::Equal(clone01->outfeed_shape(), shape01));
EXPECT_TRUE(ShapeUtil::Equal(clone10->outfeed_shape(), shape10));
}
TEST_F(HloInstructionTest, PreserveTupleShapeThroughClone) {
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
*ShapeUtil::GetMutableSubshape(tuple->mutable_shape(), {0})
->mutable_layout() = LayoutUtil::MakeLayout({0, 1});
*ShapeUtil::GetMutableSubshape(tuple->mutable_shape(), {1})
->mutable_layout() = LayoutUtil::MakeLayout({1, 0});
auto tuple_clone = tuple->Clone();
EXPECT_TRUE(ShapeUtil::Equal(tuple_clone->shape(), tuple->shape()));
}
TEST_F(HloInstructionTest, PreserveShardingThroughCompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
HloSharding tuple_sharding =
HloSharding::SingleTuple(tuple->shape(), sharding);
tuple->set_sharding(tuple_sharding);
auto clone_shape = ShapeUtil::MakeShape(F32, {3, 3});
clone_shape = ShapeUtil::MakeTupleShape({clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_EQ(tuple_clone->sharding(), tuple_sharding);
}
TEST_F(HloInstructionTest,
DoNotPreserveShardingThroughTupleTreeIncompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
tuple->set_sharding(HloSharding::SingleTuple(tuple->shape(), sharding));
auto clone_shape = ShapeUtil::MakeShape(F32, {2, 2});
clone_shape =
ShapeUtil::MakeTupleShape({clone_shape, clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_FALSE(tuple_clone->has_sharding());
}
TEST_F(HloInstructionTest,
DoNotPreserveShardingThroughLeafRankIncompatibleClone) {
HloSharding sharding = HloSharding::AssignDevice(5);
HloComputation::Builder builder(TestName());
auto* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR2<float>({
{1, 2},
{3, 4},
})));
auto* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
tuple->set_sharding(HloSharding::SingleTuple(tuple->shape(), sharding));
auto clone_shape = ShapeUtil::MakeShape(F32, {1, 2, 3});
clone_shape = ShapeUtil::MakeTupleShape({clone_shape, clone_shape});
auto tuple_clone = tuple->CloneWithNewOperands(clone_shape, {});
EXPECT_FALSE(tuple_clone->has_sharding());
}
TEST_F(HloInstructionTest, FusionOpWithCalledComputations) {
const Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto module = CreateNewVerifiedModule();
auto make_map_computation = [&]() {
auto builder = HloComputation::Builder("FusionMap");
builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "param"));
return module->AddEmbeddedComputation(builder.Build());
};
HloComputation* computation_x = make_map_computation();
HloComputation* computation_y = make_map_computation();
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto map_1_x = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {constant}, computation_x));
auto map_2_x = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {map_1_x}, computation_x));
auto map_3_y = builder.AddInstruction(
HloInstruction::CreateMap(scalar_shape, {map_2_x}, computation_y));
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{map_3_y}, HloInstruction::FusionKind::kLoop);
auto* fused_computation = fusion->fused_instructions_computation();
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
fusion->FuseInstruction(map_2_x);
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
fusion->FuseInstruction(map_1_x);
EXPECT_THAT(fusion->called_computations(), ElementsAre(fused_computation));
}
TEST_F(HloInstructionTest, ComplexFusionOp) {
HloComputation::Builder builder(TestName());
auto c1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto c2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.1f)));
auto c3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(9.0f)));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c1, c2));
auto clamp = builder.AddInstruction(
HloInstruction::CreateTernary(r0f32_, HloOpcode::kClamp, c2, add, add));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, add));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kMultiply, exp, c3));
auto sub = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kSubtract, mul, clamp));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({sub, sub, mul, c1}));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{tuple, sub, mul, exp, clamp, add}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(c1, c3, c2));
EXPECT_THAT(c1->users(), ElementsAre(fusion));
}
static bool Identical(const HloInstruction& instruction1,
const HloInstruction& instruction2) {
EXPECT_TRUE(instruction1.Identical(instruction1));
EXPECT_TRUE(instruction2.Identical(instruction2));
bool is_equal = instruction1.Identical(instruction2);
EXPECT_EQ(is_equal, instruction2.Identical(instruction1));
return is_equal;
}
static bool StructuralEqual(const HloInstruction& instruction1,
const HloInstruction& instruction2) {
auto eq_operand_shapes = [](const HloInstruction* a,
const HloInstruction* b) {
return ShapeUtil::Equal(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* a, const HloComputation* b) {
return *a == *b;
};
EXPECT_TRUE(
instruction1.Identical(instruction1, eq_operand_shapes, eq_computations));
EXPECT_TRUE(
instruction2.Identical(instruction2, eq_operand_shapes, eq_computations));
bool is_equal =
instruction1.Identical(instruction2, eq_operand_shapes, eq_computations);
EXPECT_EQ(is_equal, instruction2.Identical(instruction1, eq_operand_shapes,
eq_computations));
return is_equal;
}
TEST_F(HloInstructionTest, IdenticalInstructions) {
auto operand1 = HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}));
auto operand2 = HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{10.0, 20.0}, {30.0, 40.0}}));
auto vector_operand = HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({42.0, 123.0}));
Shape shape = operand1->shape();
HloInstruction* op1 = operand1.get();
HloInstruction* op2 = operand2.get();
EXPECT_TRUE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1)));
EXPECT_FALSE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op2)));
EXPECT_FALSE(
Identical(*HloInstruction::CreateUnary(shape, HloOpcode::kCopy, op1),
*HloInstruction::CreateUnary(shape, HloOpcode::kNegate, op1)));
EXPECT_TRUE(Identical(*HloInstruction::CreateTuple({op1, op2}),
*HloInstruction::CreateTuple({op1, op2})));
EXPECT_FALSE(Identical(*HloInstruction::CreateTuple({op1, op2}),
*HloInstruction::CreateTuple({op2, op1})));
EXPECT_TRUE(Identical(*HloInstruction::CreateBroadcast(shape, op1, {0, 1}),
*HloInstruction::CreateBroadcast(shape, op1, {0, 1})));
EXPECT_FALSE(Identical(*HloInstruction::CreateBroadcast(shape, op1, {0, 1}),
*HloInstruction::CreateBroadcast(shape, op1, {1, 0})));
Shape bcast_shape1 = ShapeUtil::MakeShape(F32, {2, 2, 42});
Shape bcast_shape2 = ShapeUtil::MakeShape(F32, {2, 2, 123});
EXPECT_FALSE(
Identical(*HloInstruction::CreateBroadcast(bcast_shape1, op1, {0, 1}),
*HloInstruction::CreateBroadcast(bcast_shape2, op1, {0, 1})));
EXPECT_TRUE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2)));
EXPECT_FALSE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kDivide, op2, op1)));
EXPECT_FALSE(Identical(
*HloInstruction::CreateBinary(shape, HloOpcode::kAdd, op1, op2),
*HloInstruction::CreateBinary(shape, HloOpcode::kDivide, op1, op2)));
}
TEST_F(HloInstructionTest, IdenticalCallInstructions) {
const char* const hlo_string = R"(
HloModule Module
subcomp1 (x: f32[]) -> f32[] {
x = f32[] parameter(0)
ROOT n = f32[] sine(x)
}
subcomp2 (x: f32[]) -> f32[] {
x = f32[] parameter(0)
ROOT n = f32[] cosine(x)
}
ENTRY entry (param: f32[]) -> (f32[], f32[], f32[]) {
p = f32[] parameter(0)
t1 = f32[] call(p), to_apply=subcomp1
t2 = f32[] call(p), to_apply=subcomp1
t3 = f32[] call(p), to_apply=subcomp2
ROOT t = (f32[], f32[], f32[]) tuple(t1, t2, t3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto* root = module->entry_computation()->root_instruction();
auto* t1 = root->operand(0);
auto* t2 = root->operand(1);
auto* t3 = root->operand(2);
EXPECT_TRUE(StructuralEqual(*t1, *t2));
EXPECT_FALSE(StructuralEqual(*t1, *t3));
}
TEST_F(HloInstructionTest, FunctionVisitor) {
const Shape f32 = ShapeUtil::MakeShape(F32, {});
HloComputation::Builder builder(TestName());
auto param =
builder.AddInstruction(HloInstruction::CreateParameter(0, f32, "0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(f32, HloOpcode::kNegate, param));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(f32, HloOpcode::kExp, param));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32, HloOpcode::kAdd, negate, exp));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
int visit_num = 0;
absl::flat_hash_map<HloInstruction*, int> visit_order;
FunctionVisitor visitor([&visit_num, &visit_order](HloInstruction* inst) {
EXPECT_FALSE(visit_order.contains(inst));
visit_order[inst] = visit_num;
visit_num++;
return absl::OkStatus();
});
EXPECT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(0, visit_order.at(param));
EXPECT_TRUE(visit_order.at(exp) == 1 || visit_order.at(exp) == 2);
EXPECT_TRUE(visit_order.at(negate) == 1 || visit_order.at(negate) == 2);
EXPECT_NE(visit_order.at(exp), visit_order.at(negate));
EXPECT_EQ(3, visit_order.at(add));
}
TEST_F(HloInstructionTest, FullyElementwise) {
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
HloComputation::Builder builder(TestName());
auto x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r1f32, "x"));
auto y =
builder.AddInstruction(HloInstruction::CreateParameter(1, r1f32, "y"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kAdd, x, y));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(add->IsElementwise());
for (int i = 0; i < add->operand_count(); ++i) {
EXPECT_TRUE(add->IsElementwiseOnOperand(i));
}
}
TEST_F(HloInstructionTest, MapIsElementwise) {
auto module = CreateNewVerifiedModule();
const Shape r2f32 =
ShapeUtil::MakeShapeWithDenseLayout(F32, {10, 10}, {1, 0});
HloComputation::Builder builder(TestName());
HloComputation::Builder map_builder("id");
map_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p0"));
auto map_computation = module->AddEmbeddedComputation(map_builder.Build());
auto x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r2f32, "x"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(r2f32, {x}, map_computation));
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(map->IsElementwise());
}
TEST_F(HloInstructionTest, PartiallyElementwise) {
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
const Shape r2f32 = ShapeUtil::MakeShape(F32, {3, 5});
HloComputation::Builder builder("PartiallyElementwise");
HloInstruction* p0 =
builder.AddInstruction(HloInstruction::CreateParameter(0, r2f32, "p0"));
HloInstruction* p1 =
builder.AddInstruction(HloInstruction::CreateParameter(1, r2f32, "p1"));
HloInstruction* p2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r2f32, "p2"));
HloInstruction* p3 =
builder.AddInstruction(HloInstruction::CreateParameter(3, r1f32, "p3"));
HloInstruction* mul = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMultiply, p0, p1));
HloInstruction* div = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kDivide, mul, p2));
HloInstruction* broadcast =
builder.AddInstruction(HloInstruction::CreateBroadcast(r2f32, p3, {1}));
HloInstruction* max = builder.AddInstruction(
HloInstruction::CreateBinary(r2f32, HloOpcode::kMaximum, div, broadcast));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{max, broadcast, div, mul}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(fusion->IsElementwise());
for (int64_t operand_idx = 0; operand_idx < fusion->operand_count();
++operand_idx) {
const HloInstruction* operand = fusion->operand(operand_idx);
if (operand == p3) {
EXPECT_FALSE(fusion->IsElementwiseOnOperand(operand_idx));
} else {
EXPECT_TRUE(fusion->IsElementwiseOnOperand(operand_idx));
}
}
}
TEST_F(HloInstructionTest, PartiallyElementwiseWithReuse) {
const Shape r0f32 = ShapeUtil::MakeShape(F32, {});
const Shape r1f32 = ShapeUtil::MakeShape(F32, {5});
HloComputation::Builder builder("PartiallyElementwiseWithReuse");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, r1f32, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32, "y"));
HloInstruction* broadcast =
builder.AddInstruction(HloInstruction::CreateBroadcast(r1f32, y, {}));
HloInstruction* min = builder.AddInstruction(
HloInstruction::CreateBinary(r1f32, HloOpcode::kMinimum, x, broadcast));
HloInstruction* sub = builder.AddInstruction(HloInstruction::CreateBinary(
r1f32, HloOpcode::kSubtract, min, broadcast));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{sub, broadcast, min}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(fusion->IsElementwise());
for (int64_t operand_idx = 0; operand_idx < fusion->operand_count();
++operand_idx) {
if (fusion->operand(operand_idx) == y) {
EXPECT_FALSE(fusion->IsElementwiseOnOperand(operand_idx));
} else {
EXPECT_TRUE(fusion->IsElementwiseOnOperand(operand_idx));
}
}
}
TEST_F(HloInstructionTest, CloneOfFusionPreservesShape) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
auto fusion2 = fusion->Clone();
const HloInstruction* root = fusion->fused_expression_root();
const HloInstruction* root2 = fusion2->fused_expression_root();
EXPECT_TRUE(ShapeUtil::Equal(root->shape(), root2->shape()));
EXPECT_TRUE(
ShapeUtil::Equal(root->operand(0)->shape(), root2->operand(0)->shape()));
EXPECT_TRUE(
ShapeUtil::Equal(root->operand(1)->shape(), root2->operand(1)->shape()));
EXPECT_TRUE(ShapeUtil::Equal(root->operand(1)->operand(0)->shape(),
root2->operand(1)->operand(0)->shape()));
EXPECT_TRUE(StructuralEqual(*fusion, *fusion2));
}
TEST_F(HloInstructionTest, FuseInstructionKeepsInstruction) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT add = f32[32,32]{1,0} add(p0, p1)
}
ENTRY reduce {
p2 = f32[32,32]{1,0} parameter(0)
p3 = f32[32,32]{1,0} parameter(1)
c1 = f32[] constant(1)
broadcast = f32[32,32]{1,0} broadcast(c1), dimensions={}
mul = f32[32,32]{1,0} multiply(p2, p3)
ROOT add = f32[32,32]{1,0} fusion(mul, broadcast), kind=kLoop, calls=fused_add
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* fused_add = module->entry_computation()->root_instruction();
HloInstruction* mul = fused_add->mutable_operand(0);
EXPECT_EQ(1, mul->user_count());
fused_add->FuseInstruction(mul);
EXPECT_EQ(0, mul->user_count());
EXPECT_EQ(fused_add->parent(), mul->parent());
}
TEST_F(HloInstructionTest, FuseInstructionIntoMultiOutputKeepsInstruction) {
constexpr char kHloString[] = R"(
HloModule test_module
fused_add {
p0 = f32[32,32]{1,0} parameter(0)
p1 = f32[32,32]{1,0} parameter(1)
ROOT add = f32[32,32]{1,0} add(p0, p1)
}
ENTRY reduce {
p2 = f32[32,32]{1,0} parameter(0)
p3 = f32[32,32]{1,0} parameter(1)
c1 = f32[] constant(1)
mul = f32[32,32]{1,0} multiply(p2, p3)
broadcast = f32[32,32]{1,0} broadcast(c1), dimensions={}
add = f32[32,32]{1,0} fusion(mul, broadcast), kind=kLoop, calls=fused_add
ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(mul, add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* root = module->entry_computation()->root_instruction();
HloInstruction* mul = root->mutable_operand(0);
HloInstruction* fused_add = root->mutable_operand(1);
EXPECT_EQ(2, mul->user_count());
fused_add->FuseInstructionIntoMultiOutput(mul);
EXPECT_EQ(0, mul->user_count());
EXPECT_EQ(root->parent(), mul->parent());
}
TEST_F(HloInstructionTest, NoRedundantFusionOperandsAfterReplacingUse) {
const Shape s = ShapeUtil::MakeShape(F32, {10, 10});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
s, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(x->ReplaceAllUsesWith(y).ok());
EXPECT_THAT(fusion->operands(), UnorderedElementsAre(y));
EXPECT_EQ(fusion->fused_instructions_computation()->num_parameters(), 1);
}
TEST_F(HloInstructionTest, FusionEquality) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
auto parameter =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, parameter));
auto neg = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, parameter));
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp}, HloInstruction::FusionKind::kLoop);
auto* fusion2 = computation->CreateFusionInstruction(
{neg}, HloInstruction::FusionKind::kLoop);
EXPECT_FALSE(StructuralEqual(*fusion, *fusion2));
auto clone = fusion->Clone();
EXPECT_TRUE(StructuralEqual(*fusion, *clone));
}
TEST_F(HloInstructionTest, NestedFusionEquality) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {2, 2});
auto a = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 0.0}, {0.0, 1.0}})));
auto b = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{2.0, 2.0}, {2.0, 2.0}})));
auto b_t = builder.AddInstruction(
HloInstruction::CreateTranspose(data_shape, b, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = builder.AddInstruction(HloInstruction::CreateDot(
data_shape, a, b_t, dot_dnums, DefaultPrecisionConfig(2)));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto add_operand = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape, one, {}));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kAdd, dot, add_operand));
auto sub = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape, HloOpcode::kSubtract, dot, add_operand));
builder.AddInstruction(
HloInstruction::CreateBinary(data_shape, HloOpcode::kMultiply, add, sub));
auto computation = module->AddEntryComputation(builder.Build());
auto nested_fusion = computation->CreateFusionInstruction(
{dot, b_t}, HloInstruction::FusionKind::kLoop);
auto fusion = computation->CreateFusionInstruction(
{add, nested_fusion}, HloInstruction::FusionKind::kOutput);
auto fusion2 = computation->CreateFusionInstruction(
{sub, nested_fusion}, HloInstruction::FusionKind::kOutput);
auto clone = fusion->Clone();
EXPECT_TRUE(StructuralEqual(*fusion, *clone));
EXPECT_FALSE(StructuralEqual(*fusion, *fusion2));
}
TEST_F(HloInstructionTest, CloneSuffixNames) {
auto foo =
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "foo");
EXPECT_EQ(foo->Clone()->name(), "foo.clone");
EXPECT_EQ(foo->Clone()->Clone()->name(), "foo.clone2");
EXPECT_EQ(foo->Clone()->Clone()->Clone()->name(), "foo.clone3");
EXPECT_EQ(foo->Clone("bar")->name(), "foo.bar");
EXPECT_EQ(foo->Clone("bar")->Clone("bar")->name(), "foo.bar2");
EXPECT_EQ(foo->Clone("bar")->Clone("bar")->Clone()->name(), "foo.bar2.clone");
auto foo_baz = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.baz");
EXPECT_EQ(foo_baz->Clone()->name(), "foo.baz.clone");
auto foo_clone234 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clone234");
EXPECT_EQ(foo_clone234->Clone()->name(), "foo.clone235");
auto foo_clonexyz = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clonexyz");
EXPECT_EQ(foo_clonexyz->Clone()->name(), "foo.clonexyz.clone");
auto foo_clone_clone3 = HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "foo.clone.clone3");
EXPECT_EQ(foo_clone_clone3->Clone()->name(), "foo.clone.clone4");
}
TEST_F(HloInstructionTest, StringifyDot) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto options = HloPrintOptions().set_print_metadata(false);
EXPECT_EQ(dot->ToString(options),
"%dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} "
"%transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}");
auto options2 = HloPrintOptions()
.set_print_metadata(false)
.set_print_operand_shape(false)
.set_print_percent(false)
.set_include_layout_in_shapes(false);
EXPECT_EQ(dot->ToString(options2),
"dot = f32[5,20] dot(x, transpose), "
"lhs_contracting_dims={1}, rhs_contracting_dims={0}");
}
TEST_F(HloInstructionTest, StringifySparseDot) {
HloComputation::Builder builder("SparseDot");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {5, 16}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32, 20}), "y"));
HloInstruction* meta = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(U16, {5, 2}), "meta"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
SparsityDescriptor sparsity_descriptor;
sparsity_descriptor.set_type(SparsityType::SPARSITY_STRUCTURED_N_M);
sparsity_descriptor.set_n(2);
sparsity_descriptor.set_m(4);
sparsity_descriptor.set_index(0);
sparsity_descriptor.set_dimension(1);
std::vector<HloInstruction*> meta_operands = {meta};
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
ShapeUtil::MakeShape(F32, {5, 20}), x, y, dot_dnums,
DefaultPrecisionConfig(2), {sparsity_descriptor}, meta_operands));
EXPECT_EQ(dot->ToString(),
"%dot = f32[5,20]{1,0} dot(f32[5,16]{1,0} %x, f32[32,20]{1,0} %y, "
"u16[5,2]{1,0} %meta), lhs_contracting_dims={1}, "
"rhs_contracting_dims={0}, sparsity=L.1@2:4");
}
TEST_F(HloInstructionTest, StringifyConditional) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
builder.AddInstruction(HloInstruction::CreateDot(sout, x, reshape, dot_dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
EXPECT_EQ(conditional->ToString(options),
"%conditional = f32[5,20]{1,0} conditional(pred[] %constant, "
"f32[5,10]{1,0} %x, f32[5,10]{1,0} %x), "
"true_computation=%TransposeDot, false_computation=%TransposeDot");
}
TEST_F(HloInstructionTest, StringifyWhile) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
builder.AddInstruction(HloInstruction::CreateDot(sout, x, reshape, dot_dnums,
DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto options = HloPrintOptions().set_print_metadata(false);
HloInstruction* loop = builder.AddInstruction(
HloInstruction::CreateWhile(sout, computation, computation, x));
EXPECT_EQ(loop->ToString(options),
"%while = f32[5,20]{1,0} while(f32[5,10]{1,0} %x), "
"condition=%TransposeDot, body=%TransposeDot");
}
TEST_F(HloInstructionTest, GetSetStatisticsViz) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 10});
HloComputation::Builder builder(TestName());
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
StatisticsViz statistics_viz;
statistics_viz.set_stat_index_to_visualize(-1);
x->set_statistics_viz(statistics_viz);
EXPECT_FALSE(x->has_statistics());
EXPECT_EQ(x->statistics_viz().stat_index_to_visualize(), -1);
Statistic statistic;
statistic.set_stat_name("stat-1");
statistic.set_stat_val(30.0);
x->add_single_statistic(statistic);
x->set_stat_index_to_visualize(0);
EXPECT_TRUE(x->has_statistics());
EXPECT_TRUE(
protobuf_util::ProtobufEquals(x->statistic_to_visualize(), statistic));
statistic.set_stat_val(40.0);
*statistics_viz.add_statistics() = statistic;
x->set_statistics_viz(statistics_viz);
EXPECT_TRUE(
protobuf_util::ProtobufEquals(x->statistics_viz(), statistics_viz));
}
TEST_F(HloInstructionTest, StringifyStatisticsViz) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 10});
HloComputation::Builder builder(TestName());
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, shape, "y"));
HloInstruction* add = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, x, y));
add->set_statistics_viz({});
EXPECT_EQ(add->ToString(),
"%add = f32[5,10]{1,0} add(f32[5,10]{1,0} %x, f32[5,10]{1,0} %y)");
auto CreateStatisticsVizWithStatistics =
[](int64_t stat_index_to_visualize,
std::initializer_list<std::pair<absl::string_view, double>> statistics)
-> StatisticsViz {
StatisticsViz statistics_viz;
statistics_viz.set_stat_index_to_visualize(stat_index_to_visualize);
auto create_statistic = [](absl::string_view statistic_name,
double statistic_value) {
Statistic statistic;
statistic.set_stat_name(std::string(statistic_name));
statistic.set_stat_val(statistic_value);
return statistic;
};
for (const auto& [statistic_name, statistic_value] : statistics) {
*statistics_viz.add_statistics() =
create_statistic(statistic_name, statistic_value);
}
return statistics_viz;
};
add->set_statistics_viz(CreateStatisticsVizWithStatistics(
1, {{"stat-1", 33.0}, {"stat-2", 44.0}}));
EXPECT_EQ(add->ToString(),
"%add = f32[5,10]{1,0} add(f32[5,10]{1,0} %x, f32[5,10]{1,0} %y), "
"statistics={visualizing_index=1,stat-1=33,stat-2=44}");
}
TEST_F(HloInstructionTest, StringifyGather_0) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape start_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 8, 7, 5});
Shape gather_result_shape =
ShapeUtil::MakeShape(F32, {10, 9, 8, 7, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Gather");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* start_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, start_indices_tensor_shape, "start_indices"));
HloInstruction* gather_instruction = builder.AddInstruction(
HloInstruction::CreateGather(gather_result_shape, input, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
4),
{30, 29, 28, 27, 26},
false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gather_instruction->ToString(),
"%gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} "
"gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,8,7,5]{4,3,2,1,0} %start_indices), "
"offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, "
"start_index_map={0,1,2,3,4}, "
"index_vector_dim=4, slice_sizes={30,29,28,27,26}");
}
TEST_F(HloInstructionTest, StringifyGather_1) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape start_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 5, 7, 6});
Shape gather_result_shape =
ShapeUtil::MakeShape(F32, {10, 9, 7, 6, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Gather");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* start_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, start_indices_tensor_shape, "start_indices"));
HloInstruction* gather_instruction = builder.AddInstruction(
HloInstruction::CreateGather(gather_result_shape, input, start_indices,
HloGatherInstruction::MakeGatherDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
2),
{30, 29, 28, 27, 26},
false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gather_instruction->ToString(),
"%gather = f32[10,9,7,6,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} "
"gather(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,5,7,6]{4,3,2,1,0} %start_indices), "
"offset_dims={4,5,6,7,8}, collapsed_slice_dims={}, "
"start_index_map={0,1,2,3,4}, "
"index_vector_dim=2, slice_sizes={30,29,28,27,26}");
}
TEST_F(HloInstructionTest, StringifyScatter) {
Shape input_tensor_shape = ShapeUtil::MakeShape(F32, {50, 49, 48, 47, 46});
Shape scatter_indices_tensor_shape =
ShapeUtil::MakeShape(S64, {10, 9, 5, 7, 6});
Shape scatter_updates_shape =
ShapeUtil::MakeShape(F32, {10, 9, 7, 6, 30, 29, 28, 27, 26});
HloComputation::Builder builder("Scatter");
HloInstruction* input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_tensor_shape, "input_tensor"));
HloInstruction* scatter_indices =
builder.AddInstruction(HloInstruction::CreateParameter(
1, scatter_indices_tensor_shape, "scatter_indices"));
HloInstruction* scatter_updates =
builder.AddInstruction(HloInstruction::CreateParameter(
2, scatter_updates_shape, "scatter_updates"));
HloComputation::Builder update_builder("Scatter.update");
update_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "p1"));
update_builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "p2"));
auto module = CreateNewVerifiedModule();
auto* update_computation =
module->AddEmbeddedComputation(update_builder.Build());
HloInstruction* scatter_instruction =
builder.AddInstruction(HloInstruction::CreateScatter(
input_tensor_shape, input, scatter_indices, scatter_updates,
update_computation,
HloScatterInstruction::MakeScatterDimNumbers(
{4, 5, 6, 7, 8},
{},
{0, 1, 2, 3, 4},
2),
false,
false));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
scatter_instruction->ToString(),
"%scatter = f32[50,49,48,47,46]{4,3,2,1,0} "
"scatter(f32[50,49,48,47,46]{4,3,2,1,0} %input_tensor, "
"s64[10,9,5,7,6]{4,3,2,1,0} %scatter_indices, "
"f32[10,9,7,6,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} %scatter_updates), "
"update_window_dims={4,5,6,7,8}, inserted_window_dims={}, "
"scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=2, "
"to_apply=%Scatter.update");
}
TEST_F(HloInstructionTest, StringifyAsyncOps) {
const Shape s1 = ShapeUtil::MakeShape(F32, {10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20});
const Shape s_tuple = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({s1}), s2, ShapeUtil::MakeShape(S32, {})});
HloComputation::Builder async_builder("AsyncOp");
HloInstruction* param = async_builder.AddInstruction(
HloInstruction::CreateParameter(0, s1, "p0"));
async_builder.AddInstruction(
HloInstruction::CreateCustomCall(s2, {param},
"foo"));
std::unique_ptr<HloComputation> async_computation = async_builder.Build();
HloComputation::Builder entry_builder("Entry");
HloInstruction* entry_param = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, s1, "p0"));
HloInstruction* async_start =
entry_builder.AddInstruction(HloInstruction::CreateAsyncStart(
s_tuple, {entry_param}, async_computation.get(),
"parallel_thread"));
HloInstruction* async_update = entry_builder.AddInstruction(
HloInstruction::CreateAsyncUpdate(s_tuple, async_start));
entry_builder.AddInstruction(
HloInstruction::CreateAsyncDone(s2, async_update));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(entry_builder.Build());
module->AddEmbeddedComputation(std::move(async_computation));
const std::string expected_with_syntax_sugar =
R"(HloModule StringifyAsyncOps, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%custom-call-start = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", custom_call_target="foo"
%custom-call-update = ((f32[10]{0}), f32[20]{0}, s32[]) custom-call-update(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-start)
ROOT %custom-call-done = f32[20]{0} custom-call-done(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-update)
}
)";
EXPECT_EQ(module->ToString(), expected_with_syntax_sugar);
const std::string expected_without_syntax_sugar =
R"(HloModule StringifyAsyncOps, entry_computation_layout={(f32[10]{0})->f32[20]{0}}
%AsyncOp (p0.1: f32[10]) -> f32[20] {
%p0.1 = f32[10]{0} parameter(0)
ROOT %custom-call = f32[20]{0} custom-call(f32[10]{0} %p0.1), custom_call_target="foo"
}, execution_thread="parallel_thread"
ENTRY %Entry (p0: f32[10]) -> f32[20] {
%p0 = f32[10]{0} parameter(0)
%custom-call-start = ((f32[10]{0}), f32[20]{0}, s32[]) async-start(f32[10]{0} %p0), async_execution_thread="parallel_thread", calls=%AsyncOp
%custom-call-update = ((f32[10]{0}), f32[20]{0}, s32[]) async-update(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-start)
ROOT %custom-call-done = f32[20]{0} async-done(((f32[10]{0}), f32[20]{0}, s32[]) %custom-call-update)
}
)";
auto options = HloPrintOptions().set_syntax_sugar_async_ops(false);
EXPECT_EQ(module->ToString(options), expected_without_syntax_sugar);
}
TEST_F(HloInstructionTest, StringifyAsyncOpsWithReduceScatter) {
const Shape rs_input_shape = ShapeUtil::MakeShape(F32, {20});
const Shape rs_output_shape = ShapeUtil::MakeShape(F32, {10});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> async_computation;
{
HloComputation::Builder async_builder("AsyncOp");
HloInstruction* param = async_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "pasync"));
async_builder.AddInstruction(HloInstruction::CreateReduceScatter(
rs_output_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false, 0));
async_computation = async_builder.Build();
}
const Shape async_start_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({rs_input_shape}), rs_output_shape});
HloComputation::Builder entry_builder("Entry");
HloInstruction* entry_param = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "pentry"));
HloInstruction* async_start =
entry_builder.AddInstruction(HloInstruction::CreateAsyncStart(
async_start_shape, {entry_param}, async_computation.get(),
"parallel_thread"));
HloInstruction* async_update = entry_builder.AddInstruction(
HloInstruction::CreateAsyncUpdate(async_start_shape, async_start));
entry_builder.AddInstruction(
HloInstruction::CreateAsyncDone(rs_output_shape, async_update));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(entry_builder.Build());
module->AddEmbeddedComputation(std::move(async_computation));
module->AddEmbeddedComputation(std::move(add_computation));
const std::string expected_with_syntax_sugar =
R"(HloModule StringifyAsyncOpsWithReduceScatter, entry_computation_layout={(f32[20]{0})->f32[10]{0}}
%add (p0: f32[], p1: f32[]) -> f32[] {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %p0, f32[] %p1)
}, execution_thread="parallel_thread"
ENTRY %Entry (pentry: f32[20]) -> f32[10] {
%pentry = f32[20]{0} parameter(0)
%reduce-scatter-start = ((f32[20]{0}), f32[10]{0}) reduce-scatter-start(f32[20]{0} %pentry), async_execution_thread="parallel_thread", replica_groups={}, dimensions={0}, to_apply=%add
%reduce-scatter-update = ((f32[20]{0}), f32[10]{0}) reduce-scatter-update(((f32[20]{0}), f32[10]{0}) %reduce-scatter-start)
ROOT %reduce-scatter-done = f32[10]{0} reduce-scatter-done(((f32[20]{0}), f32[10]{0}) %reduce-scatter-update)
}
)";
EXPECT_EQ(module->ToString(), expected_with_syntax_sugar);
const std::string expected_without_syntax_sugar =
R"(HloModule StringifyAsyncOpsWithReduceScatter, entry_computation_layout={(f32[20]{0})->f32[10]{0}}
%add (p0: f32[], p1: f32[]) -> f32[] {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %p0, f32[] %p1)
}, execution_thread="parallel_thread"
%AsyncOp (pasync: f32[20]) -> f32[10] {
%pasync = f32[20]{0} parameter(0)
ROOT %reduce-scatter = f32[10]{0} reduce-scatter(f32[20]{0} %pasync), replica_groups={}, dimensions={0}, to_apply=%add
}, execution_thread="parallel_thread"
ENTRY %Entry (pentry: f32[20]) -> f32[10] {
%pentry = f32[20]{0} parameter(0)
%reduce-scatter-start = ((f32[20]{0}), f32[10]{0}) async-start(f32[20]{0} %pentry), async_execution_thread="parallel_thread", calls=%AsyncOp
%reduce-scatter-update = ((f32[20]{0}), f32[10]{0}) async-update(((f32[20]{0}), f32[10]{0}) %reduce-scatter-start)
ROOT %reduce-scatter-done = f32[10]{0} async-done(((f32[20]{0}), f32[10]{0}) %reduce-scatter-update)
}
)";
auto options = HloPrintOptions().set_syntax_sugar_async_ops(false);
EXPECT_EQ(module->ToString(options), expected_without_syntax_sugar);
}
TEST_F(HloInstructionTest, CanonicalStringificationFusion) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto options = HloPrintOptions().Canonical();
EXPECT_EQ(dot->ToString(options),
"f32[5,20]{1,0} dot(f32[5,10]{1,0}, f32[10,20]{1,0}), "
"lhs_contracting_dims={1}, rhs_contracting_dims={0}");
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
constexpr char kParallelThreadName[] = "parallel_thread";
computation->SetExecutionThread(kParallelThreadName);
HloInstruction* fusion = computation->CreateFusionInstruction(
{dot, reshape}, HloInstruction::FusionKind::kLoop);
fusion->set_called_computations_execution_thread(
kParallelThreadName,
false);
const std::string expected_fusion =
R"(f32[5,20]{1,0} fusion(f32[5,10]{1,0}, f32[20,10]{1,0}), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="parallel_thread")";
EXPECT_EQ(fusion->ToString(options), expected_fusion);
}
TEST_F(HloInstructionTest, CanonicalStringificationWhile) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({dot, reshape},
HloInstruction::FusionKind::kLoop);
HloInstruction* loop = builder.AddInstruction(
HloInstruction::CreateWhile(sout, computation, computation, x));
auto options = HloPrintOptions().Canonical();
const std::string expected_loop =
R"(f32[5,20]{1,0} while(f32[5,10]{1,0}), condition=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, body=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
EXPECT_EQ(loop->ToString(options), expected_loop);
}
TEST_F(HloInstructionTest, CanonicalStringificationConditional) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
HloInstruction* dot = builder.AddInstruction(HloInstruction::CreateDot(
sout, x, reshape, dot_dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->CreateFusionInstruction({dot, reshape},
HloInstruction::FusionKind::kLoop);
auto pred = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* conditional =
builder.AddInstruction(HloInstruction::CreateConditional(
sout, pred, x, computation, x, computation));
auto options = HloPrintOptions().Canonical();
const std::string expected_conditional =
R"(f32[5,20]{1,0} conditional(pred[], f32[5,10]{1,0}, f32[5,10]{1,0}), true_computation=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
}, false_computation=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
ROOT tmp_2 = f32[5,20]{1,0} fusion(f32[5,10]{1,0} tmp_0, f32[20,10]{1,0} tmp_1), kind=kLoop, calls=
{
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
})";
EXPECT_EQ(conditional->ToString(options), expected_conditional);
}
TEST_F(HloInstructionTest, CheckDeepClone) {
const char* const hlo_string = R"(
HloModule Module
addy (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT zadd = s32[] add(lhs, rhs)
}
calla (x: s32[]) -> s32[] {
x = s32[] parameter(0)
reduce = s32[] reduce-window(x, x), to_apply=addy
ROOT xadd = s32[] add(x, reduce)
}
body (bparam: s32[]) -> s32[] {
constant = s32[] constant(1)
bparam = s32[] parameter(0)
v = s32[] call(bparam), to_apply=calla
ROOT add = s32[] add(constant, bparam)
}
condition (cparam: s32[]) -> pred[] {
xconstant = s32[] constant(5)
cparam = s32[] parameter(0)
ROOT greater-than = pred[] compare(xconstant, cparam), direction=GT
}
ENTRY entry (param: s32[]) -> s32[] {
eparam = s32[] parameter(0)
ROOT while = s32[] while(eparam), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> clone = module->Clone();
for (HloComputation* computation : clone->computations()) {
EXPECT_EQ(computation->parent(), clone.get());
for (HloInstruction* instruction : computation->instructions()) {
EXPECT_EQ(instruction->GetModule(), clone.get());
}
}
}
TEST_F(HloInstructionTest, IdenticalAccountsForBackendConfig) {
const Shape shape = ShapeUtil::MakeShape(F32, {42});
HloComputation::Builder builder("test");
HloInstruction* p =
builder.AddInstruction(HloInstruction::CreateParameter(0, shape, "p"));
HloInstruction* add1 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p, p));
HloInstruction* add2 = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p, p));
EXPECT_TRUE(add1->Identical(*add2));
add1->set_raw_backend_config_string("abc");
EXPECT_FALSE(add1->Identical(*add2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallWindow) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
Window w = window_util::MakeWindow({1, 2, 3});
instr1->set_window(w);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallDnums) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
ConvolutionDimensionNumbers dnums;
dnums.set_output_batch_dimension(42);
instr1->set_convolution_dimension_numbers(dnums);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, IdenticalAccountsForCustomCallHasSideEffect) {
auto instr1 = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto instr2 = instr1->Clone();
EXPECT_TRUE(instr1->Identical(*instr2));
auto custom_call_instr1 = Cast<HloCustomCallInstruction>(instr1.get());
custom_call_instr1->set_custom_call_has_side_effect(true);
EXPECT_FALSE(instr1->Identical(*instr2));
}
TEST_F(HloInstructionTest, CloneWindowOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
Window w = window_util::MakeWindow({1, 2, 3});
instr->set_window(w);
auto clone = instr->Clone();
EXPECT_TRUE(protobuf_util::ProtobufEquals(clone->window(), w))
<< clone->window().DebugString();
}
TEST_F(HloInstructionTest, CloneDnumsOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
ConvolutionDimensionNumbers dnums;
dnums.set_output_batch_dimension(42);
instr->set_convolution_dimension_numbers(dnums);
auto clone = instr->Clone();
EXPECT_TRUE(protobuf_util::ProtobufEquals(
clone->convolution_dimension_numbers(), dnums))
<< clone->convolution_dimension_numbers().DebugString();
}
TEST_F(HloInstructionTest, CloneHasSideEffectOnCustomCall) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto custom_call_instr = Cast<HloCustomCallInstruction>(instr.get());
EXPECT_FALSE(custom_call_instr->custom_call_has_side_effect());
custom_call_instr->set_custom_call_has_side_effect(true);
EXPECT_TRUE(custom_call_instr->custom_call_has_side_effect());
auto clone = instr->Clone();
auto custom_call_clone = Cast<HloCustomCallInstruction>(clone.get());
EXPECT_TRUE(custom_call_clone->custom_call_has_side_effect());
}
TEST_F(HloInstructionTest, CustomCallHasSideEffect) {
auto instr = HloInstruction::CreateCustomCall(ShapeUtil::MakeShape(F32, {}),
{},
"foo");
auto custom_call_instr = Cast<HloCustomCallInstruction>(instr.get());
EXPECT_FALSE(instr->HasSideEffect());
custom_call_instr->set_custom_call_has_side_effect(true);
EXPECT_TRUE(instr->HasSideEffect());
}
TEST_F(HloInstructionTest, PreserveOperandPrecisionOnCloneConv) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
arg0 = f32[1,2,1] parameter(0)
arg1 = f32[1,1,1] parameter(1)
ROOT conv = f32[1,2,1] convolution(arg0, arg1), window={size=1},
dim_labels=b0f_0io->b0f, operand_precision={high,default}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
auto* conv = module->entry_computation()->root_instruction();
auto clone = conv->Clone();
EXPECT_THAT(
clone->precision_config().operand_precision(),
::testing::ElementsAre(PrecisionConfig::HIGH, PrecisionConfig::DEFAULT));
}
TEST_F(HloInstructionTest, ReuseReshapeOfFusionParameter) {
constexpr char kHloString[] = R"(
HloModule test_module
f {
p = f32[3,2] parameter(0)
r = f32[2,3] reshape(p)
x = f32[2,3] multiply(r, r)
y = f32[2,3] add(r, r)
ROOT sum = f32[2,3] add(x, y)
}
ENTRY test {
p = f32[3,2] parameter(0)
ROOT fusion = f32[2,3] fusion(p), calls=f, kind=kLoop
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, ReuseMultipleReshapesOfFusionParameter) {
constexpr char kHloString[] = R"(
HloModule test_module
f {
p = f32[3,2] parameter(0)
r1 = f32[2,3] reshape(p)
r2 = f32[6,1] reshape(p)
ROOT result = (f32[2,3], f32[6,1]) tuple(r1, r2)
}
ENTRY test {
p = f32[3,2] parameter(0)
ROOT fusion = (f32[2,3], f32[6,1]) fusion(p), calls=f, kind=kLoop
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, BitcastDoesNotReuseElements) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
p = f32[3,2]{0,1} parameter(0)
ROOT bitcast = f32[6] bitcast(p)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
}
TEST_F(HloInstructionTest, GatherDoesNotReuseElements) {
constexpr char kHloString[] = R"(
HloModule test_module
ENTRY test {
input = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
idx = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
ROOT gather = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0}
gather(input, idx), offset_dims={4,5,6,7,8}, collapsed_slice_dims={},
start_index_map={0,1,2,3,4}, index_vector_dim=4,
slice_sizes={30,29,28,27,26}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(root->ReusesOperandElements(0));
EXPECT_FALSE(root->ReusesOperandElements(1));
}
TEST_F(HloInstructionTest, BackendConfigCanContainNonFiniteFloats) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
auto dot = b.AddInstruction(HloInstruction::CreateDot(
shape, p0, p0, dot_dnums, DefaultPrecisionConfig(2)));
gpu::GpuBackendConfig gpu_config;
gpu::GemmBackendConfig& orig_config =
*gpu_config.mutable_gemm_backend_config();
orig_config.set_alpha_real(std::numeric_limits<double>::infinity());
orig_config.set_alpha_imag(std::numeric_limits<double>::quiet_NaN());
TF_ASSERT_OK(dot->set_backend_config(gpu_config));
TF_ASSERT_OK_AND_ASSIGN(auto new_gpu_config,
dot->backend_config<gpu::GpuBackendConfig>());
EXPECT_GT(new_gpu_config.gemm_backend_config().alpha_real(),
std::numeric_limits<double>::max());
EXPECT_NE(new_gpu_config.gemm_backend_config().alpha_imag(),
new_gpu_config.gemm_backend_config().alpha_imag());
}
TEST_F(HloInstructionTest, VerifyToApplyRegionPointsToReduceScatter) {
const Shape rs_input_shape = ShapeUtil::MakeShape(F32, {20});
const Shape rs_output_shape = ShapeUtil::MakeShape(F32, {10});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, rs_input_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateReduceScatter(
rs_output_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(main_builder.Build());
module->AddEmbeddedComputation(std::move(add_computation));
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (!comp->IsEntryComputation()) {
EXPECT_TRUE(comp->IsCollectiveCalledComputation());
EXPECT_EQ(comp->CollectiveCallInstruction(),
module->entry_computation()->root_instruction());
}
}
}
TEST_F(HloInstructionTest, VerifyToApplyRegionPointsToAllReduce) {
const Shape ar_input_shape = ShapeUtil::MakeShape(F32, {20});
std::unique_ptr<HloComputation> add_computation;
{
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder add_builder("add");
HloInstruction* param0 = add_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* param1 = add_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "p1"));
add_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param0, param1));
add_computation = add_builder.Build();
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, ar_input_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateAllReduce(
ar_input_shape, {param}, add_computation.get(), CollectiveDeviceList(),
false, std::nullopt, false));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(main_builder.Build());
module->AddEmbeddedComputation(std::move(add_computation));
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (!comp->IsEntryComputation()) {
EXPECT_TRUE(comp->IsCollectiveCalledComputation());
EXPECT_EQ(comp->CollectiveCallInstruction(),
module->entry_computation()->root_instruction());
}
}
}
TEST_F(HloInstructionTest, PrintCycle) {
constexpr char kHloString[] = R"(
ENTRY main {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all),
channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}}"
}, control-predecessors={recv}
send-done = token[] send-done(send), channel_id=2
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
HloInstruction* recv = FindInstruction(module.get(), "recv");
HloInstruction* send_done = FindInstruction(module.get(), "send-done");
ASSERT_IS_OK(send_done->AddControlDependencyTo(recv));
HloInstruction* root = FindInstruction(module.get(), "recv-data");
NodeCollectorAndPostProcessor visitor;
auto status = root->Accept(&visitor);
EXPECT_FALSE(status.ok());
EXPECT_THAT(status.message(),
::testing::HasSubstr("recv\n send\n send-done\n recv"));
ASSERT_IS_OK(send_done->DropAllControlDeps());
}
TEST_F(HloInstructionTest, VerifyBodyComputationPointsToWhile) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder cond_builder("cond");
{
HloInstruction* param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param,
constant, ComparisonDirection::kLt));
}
auto cond_computation = module->AddEmbeddedComputation(cond_builder.Build());
HloComputation::Builder body_builder("body");
{
HloInstruction* param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
}
auto body_computation = module->AddEmbeddedComputation(body_builder.Build());
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateWhile(
scalar_shape, cond_computation, body_computation, param));
module->AddEntryComputation(main_builder.Build());
int num_while_body_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsWhileBodyComputation()) {
num_while_body_comp += 1;
EXPECT_EQ(comp->WhileCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_while_body_comp, 1);
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kWhile) {
HloComputation* while_body = instruction->while_body();
EXPECT_TRUE(while_body->IsWhileBodyComputation());
HloInstruction* while_back_ref = while_body->WhileCallInstruction();
EXPECT_EQ(while_back_ref->while_body(), while_body);
}
}
}
TEST_F(HloInstructionTest,
VerifyBranchComputationPointsToConditonal_TrueFalseConstructor) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
HloComputation::Builder branch_0_builder("branch_0");
{
HloInstruction* param = branch_0_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = branch_0_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
branch_0_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param, constant));
}
auto branch_0_computation =
module->AddEmbeddedComputation(branch_0_builder.Build());
HloComputation::Builder branch_1_builder("branch_1");
{
HloInstruction* param = branch_1_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
branch_1_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
}
auto branch_1_computation =
module->AddEmbeddedComputation(branch_1_builder.Build());
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* pred_param =
main_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(PRED, {}), "pred_param"));
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "input"));
main_builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape, pred_param, param,
branch_0_computation,
param,
branch_1_computation));
module->AddEntryComputation(main_builder.Build());
int num_conditional_branch_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsConditionalBranchComputation()) {
num_conditional_branch_comp += 1;
EXPECT_EQ(comp->ConditionalCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_conditional_branch_comp, 2);
}
TEST_F(HloInstructionTest,
VerifyBranchComputationPointsToConditonal_BranchIndexConstructor) {
auto module = CreateNewVerifiedModule();
const Shape scalar_shape = ShapeUtil::MakeScalarShape(F32);
std::vector<HloComputation*> branch_computations;
{
HloComputation::Builder builder("branch_0");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1024.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, param, constant));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
{
HloComputation::Builder builder("branch_1");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMultiply, param, param));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
{
HloComputation::Builder builder("branch_2");
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "p0"));
builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape, HloOpcode::kLog, param));
branch_computations.push_back(
module->AddEmbeddedComputation(builder.Build()));
}
std::unique_ptr<HloComputation> main_computation;
HloComputation::Builder main_builder("Entry");
HloInstruction* branch_index =
main_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(S32), "branch_index_param"));
HloInstruction* param = main_builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "input"));
std::vector<HloInstruction*> branch_computation_args(
branch_computations.size(), param);
main_builder.AddInstruction(HloInstruction::CreateConditional(
scalar_shape, branch_index, branch_computations,
branch_computation_args));
module->AddEntryComputation(main_builder.Build());
int num_conditional_branch_comp = 0;
for (HloComputation* comp : module->MakeComputationPostOrder()) {
if (comp->IsConditionalBranchComputation()) {
num_conditional_branch_comp += 1;
EXPECT_EQ(comp->ConditionalCallInstruction(),
module->entry_computation()->root_instruction());
}
}
EXPECT_EQ(num_conditional_branch_comp, branch_computations.size());
}
TEST_F(HloInstructionTest, BackendConfigCopiedToDerived) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p1"));
auto add = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
gpu::GpuBackendConfig gpu_config;
gpu_config.set_operation_queue_id(2);
TF_ASSERT_OK(add->set_backend_config(gpu_config));
auto add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p0));
add->SetupDerivedInstruction(add2);
auto backend_config = add2->backend_config<gpu::GpuBackendConfig>();
EXPECT_TRUE(backend_config.ok());
EXPECT_EQ(backend_config->operation_queue_id(), 2);
}
TEST_F(HloInstructionTest, BackendConfigNotCopiedToDerivedWithDiffOpcode) {
HloComputation::Builder b(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {2, 2});
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p1"));
auto or1 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kOr, p0, p1));
gpu::GpuBackendConfig gpu_config;
gpu_config.set_operation_queue_id(2);
TF_ASSERT_OK(or1->set_backend_config(gpu_config));
auto add2 = b.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kAdd, p0, p1));
or1->SetupDerivedInstruction(add2);
EXPECT_FALSE(add2->has_backend_config());
}
TEST_F(HloInstructionTest,
MergeMultiOutputProducerFusionIntoMultiOutputFusion) {
const std::string& hlo_string = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
param2.0 = f32[10]{0} parameter(2)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_producer
gte0 = f32[10]{0} get-tuple-element(producer), index=0
gte1 = f32[10]{0} get-tuple-element(producer), index=1
gte2 = f32[10]{0} get-tuple-element(producer), index=2
gte3 = f32[10]{0} get-tuple-element(producer), index=3
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer
gte4 = f32[10]{0} get-tuple-element(consumer), index=0
gte5 = f32[10]{0} get-tuple-element(consumer), index=1
gte6 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte0, gte1, gte3, gte4, gte5, gte6)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* producer = FindInstruction(module.get(), "producer");
HloInstruction* consumer = FindInstruction(module.get(), "consumer");
consumer->MergeFusionInstructionIntoMultiOutput(producer);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Parameter(1), m::GetTupleElement(m::Fusion(&fusion), 3),
m::Parameter(0), m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1),
m::GetTupleElement(m::Fusion(), 2))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest,
MergeMultiOutputProducerFusionIntoMultiOutputFusionAvoidDuplicateRoots) {
const std::string& hlo_string = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(add, sub)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param0.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
producer = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_producer
gte1 = f32[10]{0} get-tuple-element(producer), index=0
gte2 = f32[10]{0} get-tuple-element(producer), index=1
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2), kind=kLoop, calls=mof_consumer
gte3 = f32[10]{0} get-tuple-element(consumer), index=0
gte4 = f32[10]{0} get-tuple-element(consumer), index=1
gte5 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte1, gte3, gte4, gte5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* producer = FindInstruction(module.get(), "producer");
HloInstruction* consumer = FindInstruction(module.get(), "consumer");
consumer->MergeFusionInstructionIntoMultiOutput(producer);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion), 2),
m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1),
m::GetTupleElement(m::Fusion(), 2))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest,
MergeMultiOutputSiblingFusionsAvoidDuplicateFusionParameters) {
const std::string& hlo_string = R"(
HloModule mof
mof_sibling1 {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
mof_sibling2 {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
mul = f32[10]{0} multiply(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(mul, param1.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
sibling1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_sibling1
gte0 = f32[10]{0} get-tuple-element(sibling1), index=0
gte1 = f32[10]{0} get-tuple-element(sibling1), index=1
sibling2 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=mof_sibling2
gte2 = f32[10]{0} get-tuple-element(sibling2), index=0
gte3 = f32[10]{0} get-tuple-element(sibling2), index=1
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(gte0, gte1, gte2, gte3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* sibling1 = FindInstruction(module.get(), "sibling1");
HloInstruction* sibling2 = FindInstruction(module.get(), "sibling2");
sibling2->MergeFusionInstructionIntoMultiOutput(sibling1);
HloInstruction* fusion = nullptr;
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Parameter(1),
m::GetTupleElement(m::Fusion(&fusion), 2),
m::GetTupleElement(m::Fusion(), 0),
m::GetTupleElement(m::Fusion(), 1))));
EXPECT_THAT(fusion->fused_instructions_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Multiply(m::Parameter(0), m::Parameter(1)),
m::Parameter(1),
m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloInstructionTest, UnfuseInstruction) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused, GmockMatch(m::Add(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloInstructionTest, UnfuseInstruction2) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
add = f32[10]{0} add(param0, param1)
add2 = f32[10]{0} add(add, param1)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add2)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add2 = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
HloInstruction* add = add2->mutable_operand(0);
EXPECT_FALSE(fusion->UnfuseInstruction(add2).ok());
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused, GmockMatch(m::Add(m::Parameter(0), m::Parameter(1))));
}
TEST_F(HloInstructionTest, UnfuseInstructionWithConstantOperand) {
const std::string& hlo_string = R"(
HloModule mof
fusion_comp {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
const = f32[] constant(1.0)
broadcast = f32[10]{0} broadcast(const), dimensions={}
add = f32[10]{0} add(param0, broadcast)
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(param1, add)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
fusion.1 = (f32[10]{0}, f32[10]{0}) fusion(p0, p1), kind=kLoop, calls=fusion_comp
gte0 = f32[10]{0} get-tuple-element(fusion.1), index=0
gte1 = f32[10]{0} get-tuple-element(fusion.1), index=1
ROOT res = (f32[10]{0}, f32[10]{0}) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
HloInstruction* add = fusion->fused_instructions_computation()
->root_instruction()
->mutable_operand(1);
TF_ASSERT_OK_AND_ASSIGN(auto unfused, fusion->UnfuseInstruction(add));
EXPECT_THAT(unfused,
GmockMatch(m::Add(m::Parameter(0), m::Broadcast(m::Constant()))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_instruction.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_instruction_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0fdc034f-4003-4eb4-82c7-e682af795f59 | cpp | tensorflow/tensorflow | c_api_debug | tensorflow/c/eager/c_api_debug.cc | tensorflow/c/eager/c_api_debug_test.cc | #include <vector>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/eager/c_api.h"
#include "tensorflow/c/eager/tfe_tensor_debug_info_internal.h"
#include "tensorflow/c/eager/tfe_tensorhandle_internal.h"
#include "tensorflow/c/tf_status_internal.h"
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/platform/status.h"
using tensorflow::string;
namespace {
std::vector<int64_t> TensorShapeAsVector(const tensorflow::TensorHandle& handle,
tensorflow::Status* status) {
std::vector<int64_t> shape;
int rank = -1;
*status = handle.NumDims(&rank);
if (!status->ok()) {
return shape;
}
shape.reserve(rank);
for (int i = 0; i < rank; ++i) {
int64_t dim;
*status = handle.Dim(i, &dim);
if (!status->ok()) {
return shape;
}
shape.push_back(dim);
}
return shape;
}
}
extern "C" {
TF_CAPI_EXPORT extern TFE_TensorDebugInfo* TFE_TensorHandleTensorDebugInfo(
TFE_TensorHandle* h, TF_Status* status) {
tensorflow::TensorHandle* handle =
TensorHandleFromInterface(tensorflow::unwrap(h));
const tensorflow::Tensor* tensor;
status->status = handle->Tensor(&tensor);
if (!status->status.ok()) {
return nullptr;
}
std::vector<int64_t> dev_dims = TensorShapeAsVector(*handle, &status->status);
if (!status->status.ok()) {
return nullptr;
}
return new TFE_TensorDebugInfo(dev_dims);
}
TF_CAPI_EXPORT extern void TFE_DeleteTensorDebugInfo(
TFE_TensorDebugInfo* debug_info) {
delete debug_info;
}
TF_CAPI_EXPORT extern int TFE_TensorDebugInfoOnDeviceNumDims(
TFE_TensorDebugInfo* debug_info) {
return debug_info->dev_dims.size();
}
TF_CAPI_EXPORT extern int64_t TFE_TensorDebugInfoOnDeviceDim(
TFE_TensorDebugInfo* debug_info, int dim_index) {
return debug_info->dev_dims[dim_index];
}
} | #include "tensorflow/c/eager/c_api.h"
#include <string.h>
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
TEST(CApiDebug, ScalarCPU) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestScalarTensorHandle(ctx, 1.0f);
TFE_TensorDebugInfo* debug_info = TFE_TensorHandleTensorDebugInfo(h, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(0, TFE_TensorDebugInfoOnDeviceNumDims(debug_info));
TFE_DeleteTensorDebugInfo(debug_info);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
}
TEST(CApiDebug, 2DCPU) {
TF_Status* status = TF_NewStatus();
TFE_ContextOptions* opts = TFE_NewContextOptions();
TFE_Context* ctx = TFE_NewContext(opts, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
TFE_DeleteContextOptions(opts);
TFE_TensorHandle* h = TestMatrixTensorHandle3X2(ctx);
TFE_TensorDebugInfo* debug_info = TFE_TensorHandleTensorDebugInfo(h, status);
CHECK_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
ASSERT_EQ(2, TFE_TensorDebugInfoOnDeviceNumDims(debug_info));
EXPECT_EQ(3, TFE_TensorDebugInfoOnDeviceDim(debug_info, 0));
EXPECT_EQ(2, TFE_TensorDebugInfoOnDeviceDim(debug_info, 1));
TFE_DeleteTensorDebugInfo(debug_info);
TFE_DeleteTensorHandle(h);
TFE_DeleteContext(ctx);
TF_DeleteStatus(status);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_debug.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/eager/c_api_debug_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3a54a4f-5588-4272-a712-c729944c2433 | cpp | google/tensorstore | sha256 | tensorstore/internal/digest/sha256.cc | tensorstore/internal/digest/sha256_test.cc | #include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include "absl/strings/cord.h"
namespace tensorstore {
namespace internal {
void SHA256Digester::Write(const absl::Cord& cord) {
for (std::string_view chunk : cord.Chunks()) {
Write(chunk);
}
}
}
} | #include "tensorstore/internal/digest/sha256.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/escaping.h"
using ::tensorstore::internal::SHA256Digester;
namespace {
TEST(Sha256Digest, Basic) {
auto digest = [](auto input) {
SHA256Digester digester;
digester.Write(input);
auto digest = digester.Digest();
return absl::BytesToHexString(std::string_view(
reinterpret_cast<char*>(digest.data()), digest.size()));
};
EXPECT_THAT(
digest(std::string_view("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
EXPECT_THAT(
digest(absl::Cord("abc")),
testing::Eq(
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/digest/sha256.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/digest/sha256_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
03aad0e0-543f-4086-af9c-38f3b63304f7 | cpp | tensorflow/tensorflow | op_resolver | tensorflow/lite/core/api/op_resolver.cc | tensorflow/lite/core/api/op_resolver_test.cc | #include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
TfLiteStatus GetRegistrationFromOpCode(
const OperatorCode* opcode, const OpResolver& op_resolver,
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
"with newer model?",
builtin_code);
status = kTfLiteError;
} else if (builtin_code != BuiltinOperator_CUSTOM) {
*registration = op_resolver.FindOp(builtin_code, version);
if (*registration == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'. "
"An older version of this builtin might be supported. "
"Are you using an old TFLite binary with a newer model?\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Operator with CUSTOM builtin_code has no custom_code.\n");
status = kTfLiteError;
} else {
const char* name = opcode->custom_code()->c_str();
*registration = op_resolver.FindOp(name, version);
if (*registration == nullptr) {
status = kTfLiteError;
}
}
return status;
}
} | #include "tensorflow/lite/core/api/op_resolver.h"
#include <cstring>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
void* MockInit(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void MockFree(TfLiteContext* context, void* buffer) {
}
TfLiteStatus MockPrepare(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
TfLiteStatus MockInvoke(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteOk;
}
class MockOpResolver : public OpResolver {
public:
const TfLiteRegistration* FindOp(BuiltinOperator op,
int version) const override {
if (op == BuiltinOperator_CONV_2D) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
const TfLiteRegistration* FindOp(const char* op, int version) const override {
if (strcmp(op, "mock_custom") == 0) {
static TfLiteRegistration r = {MockInit, MockFree, MockPrepare,
MockInvoke};
return &r;
} else {
return nullptr;
}
}
};
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
char* GetBuffer() { return buffer_; }
int GetBufferSize() { return buffer_size_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
}
TEST(OpResolver, TestResolver) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
const TfLiteRegistration* registration =
resolver->FindOp(BuiltinOperator_CONV_2D, 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp(BuiltinOperator_CAST, 0);
EXPECT_EQ(nullptr, registration);
registration = resolver->FindOp("mock_custom", 0);
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
registration = resolver->FindOp("nonexistent_custom", 0);
EXPECT_EQ(nullptr, registration);
}
TEST(OpResolver, TestGetRegistrationFromOpCodeConv) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CONV_2D, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCast) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset =
CreateOperatorCodeDirect(builder, BuiltinOperator_CAST, nullptr, 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_NE(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "mock_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteOk, GetRegistrationFromOpCode(conv_code, *resolver, reporter,
®istration));
EXPECT_NE(nullptr, registration);
EXPECT_EQ(nullptr, registration->init(nullptr, nullptr, 0));
EXPECT_EQ(kTfLiteOk, registration->prepare(nullptr, nullptr));
EXPECT_EQ(kTfLiteOk, registration->invoke(nullptr, nullptr));
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
TEST(OpResolver, TestGetRegistrationFromOpCodeNonexistentCustom) {
MockOpResolver mock_resolver;
OpResolver* resolver = &mock_resolver;
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
flatbuffers::FlatBufferBuilder builder;
flatbuffers::Offset<OperatorCode> conv_offset = CreateOperatorCodeDirect(
builder, BuiltinOperator_CUSTOM, "nonexistent_custom", 0);
builder.Finish(conv_offset);
void* conv_pointer = builder.GetBufferPointer();
const OperatorCode* conv_code =
flatbuffers::GetRoot<OperatorCode>(conv_pointer);
const TfLiteRegistration* registration = nullptr;
EXPECT_EQ(kTfLiteError, GetRegistrationFromOpCode(conv_code, *resolver,
reporter, ®istration));
EXPECT_EQ(nullptr, registration);
EXPECT_EQ(0, mock_reporter.GetBufferSize());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/api/op_resolver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c9a4925f-1dc5-4ac0-9aa6-484ddcff8724 | cpp | tensorflow/tensorflow | composable_splitter | tensorflow/tools/proto_splitter/cc/composable_splitter.h | tensorflow/tools/proto_splitter/cc/composable_splitter_test.cc | #ifndef TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_COMPOSABLE_SPLITTER_H_
#define TENSORFLOW_TOOLS_PROTO_SPLITTER_CC_COMPOSABLE_SPLITTER_H_
#include <vector>
#include "tensorflow/tools/proto_splitter/cc/composable_splitter_base.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tools::proto_splitter {
class ComposableSplitter : public ComposableSplitterBase {
public:
explicit ComposableSplitter(tsl::protobuf::Message* message)
: ComposableSplitterBase(message), message_(message) {}
explicit ComposableSplitter(tsl::protobuf::Message* message,
ComposableSplitterBase* parent_splitter,
std::vector<FieldType>* fields_in_parent)
: ComposableSplitterBase(message, parent_splitter, fields_in_parent),
message_(message) {}
protected:
tsl::protobuf::Message* message() { return message_; }
private:
tsl::protobuf::Message* message_;
};
}
}
#endif | #include "tensorflow/tools/proto_splitter/cc/composable_splitter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/maker.h"
#include "riegeli/bytes/cord_reader.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/string_reader.h"
#include "riegeli/records/record_reader.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/tools/proto_splitter/cc/test_util.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#define IS_OSS true
namespace tensorflow {
namespace tools::proto_splitter {
namespace {
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter_testdata::RepeatedRepeatedString;
using ::tensorflow::proto_splitter_testdata::RepeatedString;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using tsl::testing::StatusIs;
using namespace std::string_literals;
class RepeatedStringSplitter : public ComposableSplitter {
friend class ComposableSplitter;
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override {
RepeatedString* repeated_string =
tsl::protobuf::DynamicCastToGenerated<RepeatedString>(message());
auto strings = repeated_string->strings();
if (strings.empty()) {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
return absl::OkStatus();
}
for (int i = 0; i < strings.size(); i++) {
auto s = std::make_unique<MessageBytes>(strings[i]);
std::vector<FieldType> fields = {"strings"s, i};
TF_RETURN_IF_ERROR(AddChunk(std::move(s), &fields));
}
return absl::OkStatus();
}
};
RepeatedString SetUpRepeatedString(std::vector<string> strings) {
RepeatedString message;
*message.mutable_strings() = {strings.begin(), strings.end()};
return message;
}
TEST(RepeatedStringSplitterTest, TestSplitChunks) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
for (int i = 0; i < chunks->size(); i++) {
MessageBytes chunk = (*chunks)[i];
EXPECT_THAT(chunk, ::testing::VariantWith<std::string>(strings[i]));
}
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunked_fields {
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 0 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
})pb"));
TF_ASSERT_OK_AND_ASSIGN(auto ret2, splitter.Split());
std::vector<MessageBytes>* chunks2 = ret2.chunks;
ChunkedMessage* chunked_message2 = ret2.chunked_message;
EXPECT_EQ(chunks2, chunks);
EXPECT_EQ(chunked_message2, chunked_message);
}
static void CheckChunks(riegeli::RecordReaderBase& reader,
std::vector<string>& strings) {
ChunkMetadata chunk_metadata;
reader.Seek(reader.Size().value());
reader.SeekBack();
reader.ReadRecord(chunk_metadata);
auto& chunk_info = chunk_metadata.chunks();
EXPECT_EQ(chunk_info.size(), strings.size());
for (int i = 0; i < chunk_info.size(); i++) {
reader.Seek(chunk_info[i].offset());
absl::string_view chunk;
reader.ReadRecord(chunk);
EXPECT_EQ(strings[i], std::string(chunk));
}
EXPECT_THAT(chunk_metadata.message(),
EqualsProto(R"pb(chunked_fields {
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 0 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 1 }
field_tag { index: 2 }
message { chunk_index: 2 }
})pb"));
}
TEST(RepeatedStringSplitterTest, TestWrite) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
std::string output_prefix = tensorflow::io::GetTempFilename("");
TF_ASSERT_OK(splitter.Write(output_prefix));
std::string expected_file = absl::StrCat(output_prefix, ".cpb");
TF_ASSERT_OK_AND_ASSIGN(auto exists,
internal::FileExists(Env::Default(), expected_file));
EXPECT_TRUE(exists);
riegeli::RecordReader file_reader(
riegeli::Maker<riegeli::FdReader>(std::move(expected_file)));
CheckChunks(file_reader, strings);
}
TEST(RepeatedStringSplitterTest, TestWriteToString) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
auto string_output_results = splitter.WriteToString();
TF_EXPECT_OK(string_output_results.status());
std::string string_output = std::get<0>(string_output_results.value());
bool is_chunked = std::get<1>(string_output_results.value());
EXPECT_TRUE(is_chunked);
riegeli::RecordReader string_reader(
riegeli::Maker<riegeli::StringReader>(string_output));
CheckChunks(string_reader, strings);
}
#if !IS_OSS
TEST(RepeatedStringSplitterTest, TestWriteToCord) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
auto cord_output_results = splitter.WriteToCord();
TF_EXPECT_OK(cord_output_results.status());
absl::Cord cord_output = std::get<0>(cord_output_results.value());
bool is_chunked = std::get<1>(cord_output_results.value());
EXPECT_TRUE(is_chunked);
riegeli::RecordReader cord_reader(
riegeli::Maker<riegeli::CordReader>(&cord_output));
CheckChunks(cord_reader, strings);
}
#endif
TEST(RepeatedStringSplitterTest, TestNoSplit) {
RepeatedString message;
RepeatedStringSplitter splitter = RepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
EXPECT_THAT(*chunks, SizeIs(1));
EXPECT_THAT(*std::get<tsl::protobuf::Message*>((*chunks)[0]),
EqualsProto(""));
EXPECT_THAT(*chunked_message, EqualsProto(R"pb(chunk_index: 0)pb"));
}
class RepeatedRepeatedStringSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override {
TF_RETURN_IF_ERROR(SetMessageAsBaseChunk());
RepeatedRepeatedString* msg =
tsl::protobuf::DynamicCastToGenerated<RepeatedRepeatedString>(
message());
auto repeated_strings = msg->rs();
for (int i = 0; i < repeated_strings.size(); i++) {
std::vector<FieldType> fields = {"rs"s, i};
auto splitter =
RepeatedStringSplitter(&repeated_strings[i], this, &fields);
TF_RETURN_IF_ERROR(splitter.BuildChunks());
}
return absl::OkStatus();
}
};
TEST(ComposableTest, RepeatedRepeatedStringTest) {
std::vector<string> strings1 = {"piece-1", "piece-2", "piece-3"};
auto rs1 = SetUpRepeatedString(strings1);
std::vector<string> strings2 = {"new-strings-1"};
auto rs2 = SetUpRepeatedString(strings2);
std::vector<string> strings3 = {"foo-1", "foo-2"};
auto rs3 = SetUpRepeatedString(strings3);
std::vector<RepeatedString> rs = {rs1, rs2, rs3};
RepeatedRepeatedString message;
message.mutable_rs()->Add(rs.begin(), rs.end());
RepeatedRepeatedStringSplitter splitter =
RepeatedRepeatedStringSplitter(&message);
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
ChunkedMessage* chunked_message = ret.chunked_message;
ASSERT_NE(chunked_message, nullptr);
std::vector<string> expected_chunks = {"piece-1", "piece-2", "piece-3",
"new-strings-1", "foo-1", "foo-2"};
EXPECT_THAT(*chunks, SizeIs(7));
EXPECT_THAT(*std::get<tsl::protobuf::Message*>((*chunks)[0]),
EqualsProto(message));
for (int i = 1; i < chunks->size(); i++) {
MessageBytes chunk = (*chunks)[i];
EXPECT_THAT(chunk,
::testing::VariantWith<std::string>(expected_chunks[i - 1]));
}
EXPECT_THAT(chunked_message->chunked_fields()[4],
EqualsProto(R"pb(field_tag { field: 2 }
field_tag { index: 2 }
field_tag { field: 1 }
field_tag { index: 0 }
message { chunk_index: 5 })pb"));
}
TEST(ComposableTest, ChildSplitterTest) {
std::vector<string> strings1 = {"piece-1", "piece-2", "piece-3"};
auto message1 = SetUpRepeatedString(strings1);
RepeatedStringSplitter splitter(&message1);
std::vector<FieldType> fields = {};
std::vector<string> strings2 = {"s1", "s2"};
auto message2 = SetUpRepeatedString(strings2);
RepeatedStringSplitter child(&message2, &splitter, &fields);
TF_EXPECT_OK(child.BuildChunks());
TF_ASSERT_OK_AND_ASSIGN(auto ret, splitter.Split());
std::vector<MessageBytes>* chunks = ret.chunks;
ASSERT_NE(chunks, nullptr);
EXPECT_THAT(*chunks, SizeIs(5));
}
TEST(ComposableTest, ChildSplitterUnimplementedTest) {
RepeatedString message;
RepeatedStringSplitter splitter(&message);
std::vector<FieldType> fields = {};
RepeatedStringSplitter child(&message, &splitter, &fields);
EXPECT_THAT(child.Split(), StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("`Split` function behavior")));
EXPECT_THAT(child.Write("str"),
StatusIs(absl::StatusCode::kUnimplemented,
HasSubstr("`Write` function behavior")));
}
class NoOpSplitter : public ComposableSplitter {
public:
using ComposableSplitter::ComposableSplitter;
absl::Status BuildChunks() override { return absl::OkStatus(); }
};
TEST(NoOpSplitterTest, TestWrite) {
std::vector<string> strings = {"piece-1", "piece-2", "piece-3"};
auto message = SetUpRepeatedString(strings);
NoOpSplitter splitter(&message);
std::string output_prefix = tensorflow::io::GetTempFilename("");
TF_ASSERT_OK(splitter.Write(output_prefix));
std::string expected_file = absl::StrCat(output_prefix, ".pb");
TF_ASSERT_OK_AND_ASSIGN(auto exists,
internal::FileExists(Env::Default(), expected_file));
EXPECT_TRUE(exists);
RepeatedString read_message;
auto status = tensorflow::ReadBinaryProto(tensorflow::Env::Default(),
expected_file, &read_message);
EXPECT_THAT(read_message, EqualsProto(message));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/composable_splitter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/proto_splitter/cc/composable_splitter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2e3dc4c-9f53-49f1-8f40-d936b4c2f102 | cpp | google/tensorstore | s3_metadata | tensorstore/kvstore/s3/s3_metadata.cc | tensorstore/kvstore/s3/s3_metadata_test.cc | #include "tensorstore/kvstore/s3/s3_metadata.h"
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <initializer_list>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "re2/re2.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/internal/source_location.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tinyxml2.h"
using ::tensorstore::internal_http::HttpResponse;
namespace tensorstore {
namespace internal_kvstore_s3 {
namespace {
static constexpr char kEtag[] = "etag";
static constexpr char kLt[] = "<";
static constexpr char kGt[] = ">";
static constexpr char kQuot[] = """;
static constexpr char kApos[] = "'";
static constexpr char kAmp[] = "&";
std::string UnescapeXml(std::string_view data) {
static LazyRE2 kSpecialXmlSymbols = {"(>|<|"|'|&)"};
std::string_view search = data;
std::string_view symbol;
size_t result_len = data.length();
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
result_len -= symbol.length() - 1;
}
if (result_len == data.length()) {
return std::string(data);
}
search = data;
size_t pos = 0;
size_t res_pos = 0;
auto result = std::string(result_len, '0');
while (RE2::FindAndConsume(&search, *kSpecialXmlSymbols, &symbol)) {
size_t next = data.length() - search.length();
for (size_t i = pos; i < next - symbol.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
if (symbol == kGt) {
result[res_pos++] = '>';
} else if (symbol == kLt) {
result[res_pos++] = '<';
} else if (symbol == kQuot) {
result[res_pos++] = '"';
} else if (symbol == kApos) {
result[res_pos++] = '`';
} else if (symbol == kAmp) {
result[res_pos++] = '&';
} else {
assert(false);
}
pos = next;
}
for (size_t i = pos; i < data.length(); ++i, ++res_pos) {
result[res_pos] = data[i];
}
return result;
}
bool IsRetryableAwsStatusCode(int32_t status_code) {
switch (status_code) {
case 408:
case 419:
case 429:
case 440:
case 500:
case 502:
case 503:
case 504:
case 509:
case 598:
case 599:
return true;
default:
return false;
}
}
bool IsRetryableAwsMessageCode(std::string_view code) {
static const absl::NoDestructor<absl::flat_hash_set<std::string_view>>
kRetryableMessages(absl::flat_hash_set<std::string_view>({
"InternalFailureException",
"InternalFailure",
"InternalServerError",
"InternalError",
"RequestExpiredException",
"RequestExpired",
"ServiceUnavailableException",
"ServiceUnavailableError",
"ServiceUnavailable",
"RequestThrottledException",
"RequestThrottled",
"ThrottlingException",
"ThrottledException",
"Throttling",
"SlowDownException",
"SlowDown",
"RequestTimeTooSkewedException",
"RequestTimeTooSkewed",
"RequestTimeoutException",
"RequestTimeout",
}));
return kRetryableMessages->contains(code);
}
}
std::optional<int64_t> GetNodeInt(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
int64_t result;
if (absl::SimpleAtoi(printer.CStr(), &result)) {
return result;
}
return std::nullopt;
}
std::optional<absl::Time> GetNodeTimestamp(tinyxml2::XMLNode* node) {
if (!node) {
return std::nullopt;
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
absl::Time result;
if (absl::ParseTime(absl::RFC3339_full, printer.CStr(), absl::UTCTimeZone(),
&result, nullptr)) {
return result;
}
return std::nullopt;
}
std::string GetNodeText(tinyxml2::XMLNode* node) {
if (!node) {
return "";
}
tinyxml2::XMLPrinter printer;
for (auto* child = node->FirstChild(); child != nullptr;
child = child->NextSibling()) {
child->Accept(&printer);
}
return UnescapeXml(printer.CStr());
}
Result<StorageGeneration> StorageGenerationFromHeaders(
const absl::btree_multimap<std::string, std::string>& headers) {
if (auto it = headers.find(kEtag); it != headers.end()) {
return StorageGeneration::FromString(it->second);
}
return absl::NotFoundError("etag not found in response headers");
}
absl::Status AwsHttpResponseToStatus(const HttpResponse& response,
bool& retryable, SourceLocation loc) {
auto absl_status_code = internal_http::HttpResponseCodeToStatusCode(response);
if (absl_status_code == absl::StatusCode::kOk) {
return absl::OkStatus();
}
std::string error_type;
if (auto error_header = response.headers.find("x-amzn-errortype");
error_header != response.headers.end()) {
error_type = error_header->second;
}
absl::Cord request_id;
if (auto request_id_header = response.headers.find("x-amzn-requestid");
request_id_header != response.headers.end()) {
request_id = request_id_header->second;
}
std::string message;
auto payload = response.payload;
auto payload_str = payload.Flatten();
[&]() {
if (payload.empty()) return;
tinyxml2::XMLDocument xmlDocument;
if (int xmlcode = xmlDocument.Parse(payload_str.data(), payload_str.size());
xmlcode != tinyxml2::XML_SUCCESS) {
return;
}
auto* root_node = xmlDocument.FirstChildElement("Error");
if (root_node == nullptr) return;
if (error_type.empty()) {
error_type = GetNodeText(root_node->FirstChildElement("Code"));
}
if (request_id.empty()) {
request_id = GetNodeText(root_node->FirstChildElement("RequestId"));
}
message = GetNodeText(root_node->FirstChildElement("Message"));
}();
retryable = error_type.empty()
? IsRetryableAwsStatusCode(response.status_code)
: IsRetryableAwsMessageCode(error_type);
if (error_type.empty()) {
error_type = "Unknown";
}
absl::Status status(absl_status_code,
absl::StrFormat("%s%s%s", error_type,
message.empty() ? "" : ": ", message));
status.SetPayload("http_response_code",
absl::Cord(absl::StrFormat("%d", response.status_code)));
if (!payload_str.empty()) {
status.SetPayload(
"http_response_body",
payload.Subcord(0,
payload_str.size() < 256 ? payload_str.size() : 256));
}
if (!request_id.empty()) {
status.SetPayload("x-amzn-requestid", request_id);
}
MaybeAddSourceLocation(status, loc);
return status;
}
}
} | #include "tensorstore/kvstore/s3/s3_metadata.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/http/http_response.h"
#include "tensorstore/util/status_testutil.h"
#include "tinyxml2.h"
namespace {
using ::tensorstore::StatusIs;
using ::tensorstore::internal_http::HttpResponse;
using ::tensorstore::internal_kvstore_s3::AwsHttpResponseToStatus;
using ::tensorstore::internal_kvstore_s3::GetNodeInt;
using ::tensorstore::internal_kvstore_s3::GetNodeText;
using ::tensorstore::internal_kvstore_s3::GetNodeTimestamp;
static constexpr char kListXml[] =
R"(<ListBucketResult xmlns="http:
R"(<Name>i-dont-exist</Name>)"
R"(<Prefix>tensorstore/test/</Prefix>)"
R"(<KeyCount>3</KeyCount>)"
R"(<MaxKeys>1000</MaxKeys>)"
R"(<IsTruncated>false</IsTruncated>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abc</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"900150983cd24fb0d6963f7d28e17f72"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>3</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/ab>cd</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"e2fc714c4727ee9395f324cd2e7f331f"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>4</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(<Contents>)"
R"(<Key>tensorstore/test/abcde</Key>)"
R"(<LastModified>2023-07-08T15:26:55.000Z</LastModified>)"
R"(<ETag>"ab56b4d92b40713acc5af89985d4b786"</ETag>)"
R"(<ChecksumAlgorithm>SHA256</ChecksumAlgorithm>)"
R"(<Size>5</Size>)"
R"(<StorageClass>STANDARD</StorageClass>)"
R"(</Contents>)"
R"(</ListBucketResult>)";
TEST(XmlSearchTest, GetNodeValues) {
tinyxml2::XMLDocument xmlDocument;
ASSERT_EQ(xmlDocument.Parse(kListXml), tinyxml2::XML_SUCCESS);
auto* root = xmlDocument.FirstChildElement("ListBucketResult");
ASSERT_NE(root, nullptr);
EXPECT_EQ("i-dont-exist", GetNodeText(root->FirstChildElement("Name")));
auto* contents = root->FirstChildElement("Contents");
ASSERT_NE(contents, nullptr);
EXPECT_EQ(R"("900150983cd24fb0d6963f7d28e17f72")",
GetNodeText(contents->FirstChildElement("ETag")));
EXPECT_THAT(GetNodeInt(contents->FirstChildElement("Size")),
::testing::Optional(::testing::Eq(3)));
EXPECT_THAT(
GetNodeTimestamp(contents->FirstChildElement("LastModified")),
::testing::Optional(::testing::Eq(absl::FromUnixSeconds(1688830015))));
}
TEST(S3MetadataTest, AwsHttpResponseToStatus) {
HttpResponse response;
{
response.status_code = 404;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_FALSE(retryable);
}
{
response.status_code = 429;
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kUnavailable));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>UnknownError</Code>
<Message>Unknown message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.payload = absl::Cord(R"(<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ThrottledException</Code>
<Message>Throttled message</Message>
<Resource>/mybucket/myfoto.jpg</Resource>
<RequestId>4442587FB7D0A2F9</RequestId>
</Error>
)");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
{
response.status_code = 400;
response.headers.emplace("x-amzn-errortype", "UnknownError");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_FALSE(retryable);
}
{
response.status_code = 400;
response.headers.clear();
response.headers.emplace("x-amzn-errortype", "ThrottledException");
bool retryable = false;
EXPECT_THAT(AwsHttpResponseToStatus(response, retryable),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_TRUE(retryable);
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_metadata.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/s3/s3_metadata_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
b89a5ac0-a504-4299-9d79-cc34ac6e6151 | cpp | google/quiche | tls_server_handshaker | quiche/quic/core/tls_server_handshaker.cc | quiche/quic/core/tls_server_handshaker_test.cc | #include "quiche/quic/core/tls_server_handshaker.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "absl/types/variant.h"
#include "openssl/base.h"
#include "openssl/bytestring.h"
#include "openssl/ssl.h"
#include "openssl/tls1.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_message_parser.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/crypto/proof_verifier.h"
#include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/http/http_frames.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_connection_stats.h"
#include "quiche/quic/core/quic_crypto_server_stream_base.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_time_accumulator.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_handshaker.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_hostname_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_server_stats.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#define RECORD_LATENCY_IN_US(stat_name, latency, comment) \
do { \
const int64_t latency_in_us = (latency).ToMicroseconds(); \
QUIC_DVLOG(1) << "Recording " stat_name ": " << latency_in_us; \
QUIC_SERVER_HISTOGRAM_COUNTS(stat_name, latency_in_us, 1, 10000000, 50, \
comment); \
} while (0)
namespace quic {
namespace {
uint16_t kDefaultPort = 443;
}
TlsServerHandshaker::DefaultProofSourceHandle::DefaultProofSourceHandle(
TlsServerHandshaker* handshaker, ProofSource* proof_source)
: handshaker_(handshaker), proof_source_(proof_source) {}
TlsServerHandshaker::DefaultProofSourceHandle::~DefaultProofSourceHandle() {
CloseHandle();
}
void TlsServerHandshaker::DefaultProofSourceHandle::CloseHandle() {
QUIC_DVLOG(1) << "CloseHandle. is_signature_pending="
<< (signature_callback_ != nullptr);
if (signature_callback_) {
signature_callback_->Cancel();
signature_callback_ = nullptr;
}
}
QuicAsyncStatus
TlsServerHandshaker::DefaultProofSourceHandle::SelectCertificate(
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address,
const QuicConnectionId& ,
absl::string_view , const std::string& hostname,
absl::string_view , const std::string& ,
std::optional<std::string> ,
const std::vector<uint8_t>& ,
const std::optional<std::vector<uint8_t>>& ,
const QuicSSLConfig& ) {
if (!handshaker_ || !proof_source_) {
QUIC_BUG(quic_bug_10341_1)
<< "SelectCertificate called on a detached handle";
return QUIC_FAILURE;
}
bool cert_matched_sni;
quiche::QuicheReferenceCountedPointer<ProofSource::Chain> chain =
proof_source_->GetCertChain(server_address, client_address, hostname,
&cert_matched_sni);
handshaker_->OnSelectCertificateDone(
true, true,
ProofSourceHandleCallback::LocalSSLConfig{chain.get(),
QuicDelayedSSLConfig()},
absl::string_view(), cert_matched_sni);
if (!handshaker_->select_cert_status().has_value()) {
QUIC_BUG(quic_bug_12423_1)
<< "select_cert_status() has no value after a synchronous select cert";
return QUIC_SUCCESS;
}
return *handshaker_->select_cert_status();
}
QuicAsyncStatus TlsServerHandshaker::DefaultProofSourceHandle::ComputeSignature(
const QuicSocketAddress& server_address,
const QuicSocketAddress& client_address, const std::string& hostname,
uint16_t signature_algorithm, absl::string_view in,
size_t max_signature_size) {
if (!handshaker_ || !proof_source_) {
QUIC_BUG(quic_bug_10341_2)
<< "ComputeSignature called on a detached handle";
return QUIC_FAILURE;
}
if (signature_callback_) {
QUIC_BUG(quic_bug_10341_3) << "ComputeSignature called while pending";
return QUIC_FAILURE;
}
signature_callback_ = new DefaultSignatureCallback(this);
proof_source_->ComputeTlsSignature(
server_address, client_address, hostname, signature_algorithm, in,
std::unique_ptr<DefaultSignatureCallback>(signature_callback_));
if (signature_callback_) {
QUIC_DVLOG(1) << "ComputeTlsSignature is pending";
signature_callback_->set_is_sync(false);
return QUIC_PENDING;
}
bool success = handshaker_->HasValidSignature(max_signature_size);
QUIC_DVLOG(1) << "ComputeTlsSignature completed synchronously. success:"
<< success;
return success ? QUIC_SUCCESS : QUIC_FAILURE;
}
TlsServerHandshaker::DecryptCallback::DecryptCallback(
TlsServerHandshaker* handshaker)
: handshaker_(handshaker) {}
void TlsServerHandshaker::DecryptCallback::Run(std::vector<uint8_t> plaintext) {
if (handshaker_ == nullptr) {
return;
}
TlsServerHandshaker* handshaker = handshaker_;
handshaker_ = nullptr;
handshaker->decrypted_session_ticket_ = std::move(plaintext);
const bool is_async =
(handshaker->expected_ssl_error() == SSL_ERROR_PENDING_TICKET);
std::optional<QuicConnectionContextSwitcher> context_switcher;
if (is_async) {
context_switcher.emplace(handshaker->connection_context());
}
QUIC_TRACESTRING(
absl::StrCat("TLS ticket decryption done. len(decrypted_ticket):",
handshaker->decrypted_session_ticket_.size()));
if (is_async) {
handshaker->AdvanceHandshakeFromCallback();
}
handshaker->ticket_decryption_callback_ = nullptr;
}
void TlsServerHandshaker::DecryptCallback::Cancel() {
QUICHE_DCHECK(handshaker_);
handshaker_ = nullptr;
}
TlsServerHandshaker::TlsServerHandshaker(
QuicSession* session, const QuicCryptoServerConfig* crypto_config)
: TlsHandshaker(this, session),
QuicCryptoServerStreamBase(session),
proof_source_(crypto_config->proof_source()),
pre_shared_key_(crypto_config->pre_shared_key()),
crypto_negotiated_params_(new QuicCryptoNegotiatedParameters),
tls_connection_(crypto_config->ssl_ctx(), this, session->GetSSLConfig()),
crypto_config_(crypto_config) {
QUIC_DVLOG(1) << "TlsServerHandshaker: client_cert_mode initial value: "
<< client_cert_mode();
QUICHE_DCHECK_EQ(PROTOCOL_TLS1_3,
session->connection()->version().handshake_protocol);
SSL_set_accept_state(ssl());
int use_legacy_extension = 0;
if (session->version().UsesLegacyTlsExtension()) {
use_legacy_extension = 1;
}
SSL_set_quic_use_legacy_codepoint(ssl(), use_legacy_extension);
if (session->connection()->context()->tracer) {
tls_connection_.EnableInfoCallback();
}
#if BORINGSSL_API_VERSION >= 22
if (!crypto_config->preferred_groups().empty()) {
SSL_set1_group_ids(ssl(), crypto_config->preferred_groups().data(),
crypto_config->preferred_groups().size());
}
#endif
}
TlsServerHandshaker::~TlsServerHandshaker() { CancelOutstandingCallbacks(); }
void TlsServerHandshaker::CancelOutstandingCallbacks() {
if (proof_source_handle_) {
proof_source_handle_->CloseHandle();
}
if (ticket_decryption_callback_) {
ticket_decryption_callback_->Cancel();
ticket_decryption_callback_ = nullptr;
}
}
void TlsServerHandshaker::InfoCallback(int type, int value) {
QuicConnectionTracer* tracer =
session()->connection()->context()->tracer.get();
if (tracer == nullptr) {
return;
}
if (type & SSL_CB_LOOP) {
tracer->PrintString(
absl::StrCat("SSL:ACCEPT_LOOP:", SSL_state_string_long(ssl())));
} else if (type & SSL_CB_ALERT) {
const char* prefix =
(type & SSL_CB_READ) ? "SSL:READ_ALERT:" : "SSL:WRITE_ALERT:";
tracer->PrintString(absl::StrCat(prefix, SSL_alert_type_string_long(value),
":", SSL_alert_desc_string_long(value)));
} else if (type & SSL_CB_EXIT) {
const char* prefix =
(value == 1) ? "SSL:ACCEPT_EXIT_OK:" : "SSL:ACCEPT_EXIT_FAIL:";
tracer->PrintString(absl::StrCat(prefix, SSL_state_string_long(ssl())));
} else if (type & SSL_CB_HANDSHAKE_START) {
tracer->PrintString(
absl::StrCat("SSL:HANDSHAKE_START:", SSL_state_string_long(ssl())));
} else if (type & SSL_CB_HANDSHAKE_DONE) {
tracer->PrintString(
absl::StrCat("SSL:HANDSHAKE_DONE:", SSL_state_string_long(ssl())));
} else {
QUIC_DLOG(INFO) << "Unknown event type " << type << ": "
<< SSL_state_string_long(ssl());
tracer->PrintString(
absl::StrCat("SSL:unknown:", value, ":", SSL_state_string_long(ssl())));
}
}
std::unique_ptr<ProofSourceHandle>
TlsServerHandshaker::MaybeCreateProofSourceHandle() {
return std::make_unique<DefaultProofSourceHandle>(this, proof_source_);
}
bool TlsServerHandshaker::GetBase64SHA256ClientChannelID(
std::string* ) const {
return false;
}
void TlsServerHandshaker::SendServerConfigUpdate(
const CachedNetworkParameters* ) {
}
bool TlsServerHandshaker::DisableResumption() {
if (!can_disable_resumption_ || !session()->connection()->connected()) {
return false;
}
tls_connection_.DisableTicketSupport();
return true;
}
bool TlsServerHandshaker::IsZeroRtt() const {
return SSL_early_data_accepted(ssl());
}
bool TlsServerHandshaker::IsResumption() const {
return SSL_session_reused(ssl());
}
bool TlsServerHandshaker::ResumptionAttempted() const {
return ticket_received_;
}
bool TlsServerHandshaker::EarlyDataAttempted() const {
QUIC_BUG_IF(quic_tls_early_data_attempted_too_early,
!select_cert_status_.has_value())
<< "EarlyDataAttempted must be called after EarlySelectCertCallback is "
"started";
return early_data_attempted_;
}
int TlsServerHandshaker::NumServerConfigUpdateMessagesSent() const {
return 0;
}
const CachedNetworkParameters*
TlsServerHandshaker::PreviousCachedNetworkParams() const {
return last_received_cached_network_params_.get();
}
void TlsServerHandshaker::SetPreviousCachedNetworkParams(
CachedNetworkParameters cached_network_params) {
last_received_cached_network_params_ =
std::make_unique<CachedNetworkParameters>(cached_network_params);
}
void TlsServerHandshaker::OnPacketDecrypted(EncryptionLevel level) {
if (level == ENCRYPTION_HANDSHAKE && state_ < HANDSHAKE_PROCESSED) {
state_ = HANDSHAKE_PROCESSED;
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_INITIAL);
}
}
void TlsServerHandshaker::OnHandshakeDoneReceived() { QUICHE_DCHECK(false); }
void TlsServerHandshaker::OnNewTokenReceived(absl::string_view ) {
QUICHE_DCHECK(false);
}
std::string TlsServerHandshaker::GetAddressToken(
const CachedNetworkParameters* cached_network_params) const {
SourceAddressTokens empty_previous_tokens;
const QuicConnection* connection = session()->connection();
return crypto_config_->NewSourceAddressToken(
crypto_config_->source_address_token_boxer(), empty_previous_tokens,
connection->effective_peer_address().host(),
connection->random_generator(), connection->clock()->WallNow(),
cached_network_params);
}
bool TlsServerHandshaker::ValidateAddressToken(absl::string_view token) const {
SourceAddressTokens tokens;
HandshakeFailureReason reason = crypto_config_->ParseSourceAddressToken(
crypto_config_->source_address_token_boxer(), token, tokens);
if (reason != HANDSHAKE_OK) {
QUIC_DLOG(WARNING) << "Failed to parse source address token: "
<< CryptoUtils::HandshakeFailureReasonToString(reason);
return false;
}
auto cached_network_params = std::make_unique<CachedNetworkParameters>();
reason = crypto_config_->ValidateSourceAddressTokens(
tokens, session()->connection()->effective_peer_address().host(),
session()->connection()->clock()->WallNow(), cached_network_params.get());
if (reason != HANDSHAKE_OK) {
QUIC_DLOG(WARNING) << "Failed to validate source address token: "
<< CryptoUtils::HandshakeFailureReasonToString(reason);
return false;
}
last_received_cached_network_params_ = std::move(cached_network_params);
return true;
}
bool TlsServerHandshaker::ShouldSendExpectCTHeader() const { return false; }
bool TlsServerHandshaker::DidCertMatchSni() const { return cert_matched_sni_; }
const ProofSource::Details* TlsServerHandshaker::ProofSourceDetails() const {
return proof_source_details_.get();
}
bool TlsServerHandshaker::ExportKeyingMaterial(absl::string_view label,
absl::string_view context,
size_t result_len,
std::string* result) {
return ExportKeyingMaterialForLabel(label, context, result_len, result);
}
void TlsServerHandshaker::OnConnectionClosed(
const QuicConnectionCloseFrame& frame, ConnectionCloseSource source) {
TlsHandshaker::OnConnectionClosed(frame.quic_error_code, source);
}
ssl_early_data_reason_t TlsServerHandshaker::EarlyDataReason() const {
return TlsHandshaker::EarlyDataReason();
}
bool TlsServerHandshaker::encryption_established() const {
return encryption_established_;
}
bool TlsServerHandshaker::one_rtt_keys_available() const {
return state_ == HANDSHAKE_CONFIRMED;
}
const QuicCryptoNegotiatedParameters&
TlsServerHandshaker::crypto_negotiated_params() const {
return *crypto_negotiated_params_;
}
CryptoMessageParser* TlsServerHandshaker::crypto_message_parser() {
return TlsHandshaker::crypto_message_parser();
}
HandshakeState TlsServerHandshaker::GetHandshakeState() const { return state_; }
void TlsServerHandshaker::SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> state) {
application_state_ = std::move(state);
}
size_t TlsServerHandshaker::BufferSizeLimitForLevel(
EncryptionLevel level) const {
return TlsHandshaker::BufferSizeLimitForLevel(level);
}
std::unique_ptr<QuicDecrypter>
TlsServerHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return TlsHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter>
TlsServerHandshaker::CreateCurrentOneRttEncrypter() {
return TlsHandshaker::CreateCurrentOneRttEncrypter();
}
void TlsServerHandshaker::OverrideQuicConfigDefaults(QuicConfig* ) {}
void TlsServerHandshaker::AdvanceHandshakeFromCallback() {
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
AdvanceHandshake();
if (!is_connection_closed()) {
handshaker_delegate()->OnHandshakeCallbackDone();
}
}
bool TlsServerHandshaker::ProcessTransportParameters(
const SSL_CLIENT_HELLO* client_hello, std::string* error_details) {
TransportParameters client_params;
const uint8_t* client_params_bytes;
size_t params_bytes_len;
uint16_t extension_type = TLSEXT_TYPE_quic_transport_parameters_standard;
if (session()->version().UsesLegacyTlsExtension()) {
extension_type = TLSEXT_TYPE_quic_transport_parameters_legacy;
}
if (!SSL_early_callback_ctx_extension_get(client_hello, extension_type,
&client_params_bytes,
¶ms_bytes_len)) {
params_bytes_len = 0;
}
if (params_bytes_len == 0) {
*error_details = "Client's transport parameters are missing";
return false;
}
std::string parse_error_details;
if (!ParseTransportParameters(session()->connection()->version(),
Perspective::IS_CLIENT, client_params_bytes,
params_bytes_len, &client_params,
&parse_error_details)) {
QUICHE_DCHECK(!parse_error_details.empty());
*error_details =
"Unable to parse client's transport parameters: " + parse_error_details;
return false;
}
session()->connection()->OnTransportParametersReceived(client_params);
if (client_params.legacy_version_information.has_value() &&
CryptoUtils::ValidateClientHelloVersion(
client_params.legacy_version_information->version,
session()->connection()->version(), session()->supported_versions(),
error_details) != QUIC_NO_ERROR) {
return false;
}
if (client_params.version_information.has_value() &&
!CryptoUtils::ValidateChosenVersion(
client_params.version_information->chosen_version,
session()->version(), error_details)) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
if (handshaker_delegate()->ProcessTransportParameters(
client_params, false, error_details) !=
QUIC_NO_ERROR) {
return false;
}
if (!ProcessAdditionalTransportParameters(client_params)) {
*error_details = "Failed to process additional transport parameters";
return false;
}
return true;
}
TlsServerHandshaker::SetTransportParametersResult
TlsServerHandshaker::SetTransportParameters() {
SetTransportParametersResult result;
QUICHE_DCHECK(!result.success);
server_params_.perspective = Perspective::IS_SERVER;
server_params_.legacy_version_information =
TransportParameters::LegacyVersionInformation();
server_params_.legacy_version_information->supported_versions =
CreateQuicVersionLabelVector(session()->supported_versions());
server_params_.legacy_version_information->version =
CreateQuicVersionLabel(session()->connection()->version());
server_params_.version_information =
TransportParameters::VersionInformation();
server_params_.version_information->chosen_version =
CreateQuicVersionLabel(session()->version());
server_params_.version_information->other_versions =
CreateQuicVersionLabelVector(session()->supported_versions());
if (!handshaker_delegate()->FillTransportParameters(&server_params_)) {
return result;
}
session()->connection()->OnTransportParametersSent(server_params_);
{
std::vector<uint8_t> server_params_bytes;
if (!SerializeTransportParameters(server_params_, &server_params_bytes) ||
SSL_set_quic_transport_params(ssl(), server_params_bytes.data(),
server_params_bytes.size()) != 1) {
return result;
}
result.quic_transport_params = std::move(server_params_bytes);
}
if (application_state_) {
std::vector<uint8_t> early_data_context;
if (!SerializeTransportParametersForTicket(
server_params_, *application_state_, &early_data_context)) {
QUIC_BUG(quic_bug_10341_4)
<< "Failed to serialize Transport Parameters for ticket.";
result.early_data_context = std::vector<uint8_t>();
return result;
}
SSL_set_quic_early_data_context(ssl(), early_data_context.data(),
early_data_context.size());
result.early_data_context = std::move(early_data_context);
application_state_.reset(nullptr);
}
result.success = true;
return result;
}
bool TlsServerHandshaker::TransportParametersMatch(
absl::Span<const uint8_t> serialized_params) const {
TransportParameters params;
std::string error_details;
bool parse_ok = ParseTransportParameters(
session()->version(), Perspective::IS_SERVER, serialized_params.data(),
serialized_params.size(), ¶ms, &error_details);
if (!parse_ok) {
return false;
}
DegreaseTransportParameters(params);
return params == server_params_;
}
void TlsServerHandshaker::SetWriteSecret(
EncryptionLevel level, const SSL_CIPHER* cipher,
absl::Span<const uint8_t> write_secret) {
if (is_connection_closed()) {
return;
}
if (level == ENCRYPTION_FORWARD_SECURE) {
encryption_established_ = true;
const SSL_CIPHER* ssl_cipher = SSL_get_current_cipher(ssl());
if (ssl_cipher) {
crypto_negotiated_params_->cipher_suite =
SSL_CIPHER_get_protocol_id(ssl_cipher);
}
crypto_negotiated_params_->key_exchange_group = SSL_get_curve_id(ssl());
crypto_negotiated_params_->encrypted_client_hello = SSL_ech_accepted(ssl());
}
TlsHandshaker::SetWriteSecret(level, cipher, write_secret);
}
std::string TlsServerHandshaker::GetAcceptChValueForHostname(
const std::string& ) const {
return {};
}
bool TlsServerHandshaker::UseAlpsNewCodepoint() const {
if (!select_cert_status_.has_value()) {
QUIC_BUG(quic_tls_check_alps_new_codepoint_too_early)
<< "UseAlpsNewCodepoint must be called after "
"EarlySelectCertCallback is started";
return false;
}
return alps_new_codepoint_received_;
}
void TlsServerHandshaker::FinishHandshake() {
QUICHE_DCHECK(!SSL_in_early_data(ssl()));
if (!valid_alpn_received_) {
QUIC_DLOG(ERROR)
<< "Server: handshake finished without receiving a known ALPN";
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Server did not receive a known ALPN");
return;
}
ssl_early_data_reason_t reason_code = EarlyDataReason();
QUIC_DLOG(INFO) << "Server: handshake finished. Early data reason "
<< reason_code << " ("
<< CryptoUtils::EarlyDataReasonToString(reason_code) << ")";
state_ = HANDSHAKE_CONFIRMED;
handshaker_delegate()->OnTlsHandshakeComplete();
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_HANDSHAKE);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_HANDSHAKE);
}
QuicAsyncStatus TlsServerHandshaker::VerifyCertChain(
const std::vector<std::string>& , std::string* ,
std::unique_ptr<ProofVerifyDetails>* , uint8_t* ,
std::unique_ptr<ProofVerifierCallback> ) {
QUIC_DVLOG(1) << "VerifyCertChain returning success";
return QUIC_SUCCESS;
}
void TlsServerHandshaker::OnProofVerifyDetailsAvailable(
const ProofVerifyDetails& ) {}
ssl_private_key_result_t TlsServerHandshaker::PrivateKeySign(
uint8_t* out, size_t* out_len, size_t max_out, uint16_t sig_alg,
absl::string_view in) {
QUICHE_DCHECK_EQ(expected_ssl_error(), SSL_ERROR_WANT_READ);
QuicAsyncStatus status = proof_source_handle_->ComputeSignature(
session()->connection()->self_address(),
session()->connection()->peer_address(), crypto_negotiated_params_->sni,
sig_alg, in, max_out);
if (status == QUIC_PENDING) {
set_expected_ssl_error(SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
if (async_op_timer_.has_value()) {
QUIC_CODE_COUNT(
quic_tls_server_computing_signature_while_another_op_pending);
}
async_op_timer_ = QuicTimeAccumulator();
async_op_timer_->Start(now());
}
return PrivateKeyComplete(out, out_len, max_out);
}
ssl_private_key_result_t TlsServerHandshaker::PrivateKeyComplete(
uint8_t* out, size_t* out_len, size_t max_out) {
if (expected_ssl_error() == SSL_ERROR_WANT_PRIVATE_KEY_OPERATION) {
return ssl_private_key_retry;
}
const bool success = HasValidSignature(max_out);
QuicConnectionStats::TlsServerOperationStats compute_signature_stats;
compute_signature_stats.success = success;
if (async_op_timer_.has_value()) {
async_op_timer_->Stop(now());
compute_signature_stats.async_latency =
async_op_timer_->GetTotalElapsedTime();
async_op_timer_.reset();
RECORD_LATENCY_IN_US("tls_server_async_compute_signature_latency_us",
compute_signature_stats.async_latency,
"Async compute signature latency in microseconds");
}
connection_stats().tls_server_compute_signature_stats =
std::move(compute_signature_stats);
if (!success) {
return ssl_private_key_failure;
}
*out_len = cert_verify_sig_.size();
memcpy(out, cert_verify_sig_.data(), *out_len);
cert_verify_sig_.clear();
cert_verify_sig_.shrink_to_fit();
return ssl_private_key_success;
}
void TlsServerHandshaker::OnComputeSignatureDone(
bool ok, bool is_sync, std::string signature,
std::unique_ptr<ProofSource::Details> details) {
QUIC_DVLOG(1) << "OnComputeSignatureDone. ok:" << ok
<< ", is_sync:" << is_sync
<< ", len(signature):" << signature.size();
std::optional<QuicConnectionContextSwitcher> context_switcher;
if (!is_sync) {
context_switcher.emplace(connection_context());
}
QUIC_TRACESTRING(absl::StrCat("TLS compute signature done. ok:", ok,
", len(signature):", signature.size()));
if (ok) {
cert_verify_sig_ = std::move(signature);
proof_source_details_ = std::move(details);
}
const int last_expected_ssl_error = expected_ssl_error();
set_expected_ssl_error(SSL_ERROR_WANT_READ);
if (!is_sync) {
QUICHE_DCHECK_EQ(last_expected_ssl_error,
SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
AdvanceHandshakeFromCallback();
}
}
bool TlsServerHandshaker::HasValidSignature(size_t max_signature_size) const {
return !cert_verify_sig_.empty() &&
cert_verify_sig_.size() <= max_signature_size;
}
size_t TlsServerHandshaker::SessionTicketMaxOverhead() {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
return proof_source_->GetTicketCrypter()->MaxOverhead();
}
int TlsServerHandshaker::SessionTicketSeal(uint8_t* out, size_t* out_len,
size_t max_out_len,
absl::string_view in) {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
std::vector<uint8_t> ticket =
proof_source_->GetTicketCrypter()->Encrypt(in, ticket_encryption_key_);
if (GetQuicReloadableFlag(
quic_send_placeholder_ticket_when_encrypt_ticket_fails) &&
ticket.empty()) {
QUIC_CODE_COUNT(quic_tls_server_handshaker_send_placeholder_ticket);
const absl::string_view kTicketFailurePlaceholder = "TICKET FAILURE";
const absl::string_view kTicketWithSizeLimit =
kTicketFailurePlaceholder.substr(0, max_out_len);
ticket.assign(kTicketWithSizeLimit.begin(), kTicketWithSizeLimit.end());
}
if (max_out_len < ticket.size()) {
QUIC_BUG(quic_bug_12423_2)
<< "TicketCrypter returned " << ticket.size()
<< " bytes of ciphertext, which is larger than its max overhead of "
<< max_out_len;
return 0;
}
*out_len = ticket.size();
memcpy(out, ticket.data(), ticket.size());
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_sealed);
return 1;
}
ssl_ticket_aead_result_t TlsServerHandshaker::SessionTicketOpen(
uint8_t* out, size_t* out_len, size_t max_out_len, absl::string_view in) {
QUICHE_DCHECK(proof_source_->GetTicketCrypter());
if (ignore_ticket_open_) {
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_ignored_1);
return ssl_ticket_aead_ignore_ticket;
}
if (!ticket_decryption_callback_) {
ticket_decryption_callback_ = std::make_shared<DecryptCallback>(this);
proof_source_->GetTicketCrypter()->Decrypt(in, ticket_decryption_callback_);
if (ticket_decryption_callback_) {
QUICHE_DCHECK(!ticket_decryption_callback_->IsDone());
set_expected_ssl_error(SSL_ERROR_PENDING_TICKET);
if (async_op_timer_.has_value()) {
QUIC_CODE_COUNT(
quic_tls_server_decrypting_ticket_while_another_op_pending);
}
async_op_timer_ = QuicTimeAccumulator();
async_op_timer_->Start(now());
}
}
if (ticket_decryption_callback_ && !ticket_decryption_callback_->IsDone()) {
return ssl_ticket_aead_retry;
}
ssl_ticket_aead_result_t result =
FinalizeSessionTicketOpen(out, out_len, max_out_len);
QuicConnectionStats::TlsServerOperationStats decrypt_ticket_stats;
decrypt_ticket_stats.success = (result == ssl_ticket_aead_success);
if (async_op_timer_.has_value()) {
async_op_timer_->Stop(now());
decrypt_ticket_stats.async_latency = async_op_timer_->GetTotalElapsedTime();
async_op_timer_.reset();
RECORD_LATENCY_IN_US("tls_server_async_decrypt_ticket_latency_us",
decrypt_ticket_stats.async_latency,
"Async decrypt ticket latency in microseconds");
}
connection_stats().tls_server_decrypt_ticket_stats =
std::move(decrypt_ticket_stats);
return result;
}
ssl_ticket_aead_result_t TlsServerHandshaker::FinalizeSessionTicketOpen(
uint8_t* out, size_t* out_len, size_t max_out_len) {
ticket_decryption_callback_ = nullptr;
set_expected_ssl_error(SSL_ERROR_WANT_READ);
if (decrypted_session_ticket_.empty()) {
QUIC_DLOG(ERROR) << "Session ticket decryption failed; ignoring ticket";
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_ignored_2);
return ssl_ticket_aead_ignore_ticket;
}
if (max_out_len < decrypted_session_ticket_.size()) {
return ssl_ticket_aead_error;
}
memcpy(out, decrypted_session_ticket_.data(),
decrypted_session_ticket_.size());
*out_len = decrypted_session_ticket_.size();
QUIC_CODE_COUNT(quic_tls_server_handshaker_tickets_opened);
return ssl_ticket_aead_success;
}
ssl_select_cert_result_t TlsServerHandshaker::EarlySelectCertCallback(
const SSL_CLIENT_HELLO* client_hello) {
if (select_cert_status_.has_value()) {
QUIC_DVLOG(1) << "EarlySelectCertCallback called to continue handshake, "
"returning directly. success:"
<< (*select_cert_status_ == QUIC_SUCCESS);
return (*select_cert_status_ == QUIC_SUCCESS) ? ssl_select_cert_success
: ssl_select_cert_error;
}
select_cert_status_ = QUIC_PENDING;
proof_source_handle_ = MaybeCreateProofSourceHandle();
if (!pre_shared_key_.empty()) {
QUIC_BUG(quic_bug_10341_6)
<< "QUIC server pre-shared keys not yet supported with TLS";
set_extra_error_details("select_cert_error: pre-shared keys not supported");
return ssl_select_cert_error;
}
{
const uint8_t* unused_extension_bytes;
size_t unused_extension_len;
ticket_received_ = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_pre_shared_key, &unused_extension_bytes,
&unused_extension_len);
early_data_attempted_ = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_early_data, &unused_extension_bytes,
&unused_extension_len);
int use_alps_new_codepoint = 0;
#if BORINGSSL_API_VERSION >= 27
if (GetQuicReloadableFlag(quic_gfe_allow_alps_new_codepoint)) {
QUIC_RELOADABLE_FLAG_COUNT(quic_gfe_allow_alps_new_codepoint);
alps_new_codepoint_received_ = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_application_settings,
&unused_extension_bytes, &unused_extension_len);
if (alps_new_codepoint_received_) {
QUIC_CODE_COUNT(quic_gfe_alps_use_new_codepoint);
use_alps_new_codepoint = 1;
}
QUIC_DLOG(INFO) << "ALPS use new codepoint: " << use_alps_new_codepoint;
SSL_set_alps_use_new_codepoint(ssl(), use_alps_new_codepoint);
}
#endif
if (use_alps_new_codepoint == 0) {
QUIC_CODE_COUNT(quic_gfe_alps_use_old_codepoint);
}
}
const char* hostname = SSL_get_servername(ssl(), TLSEXT_NAMETYPE_host_name);
if (hostname) {
crypto_negotiated_params_->sni =
QuicHostnameUtils::NormalizeHostname(hostname);
if (!ValidateHostname(hostname)) {
if (GetQuicReloadableFlag(quic_new_error_code_for_invalid_hostname)) {
QUIC_RELOADABLE_FLAG_COUNT(quic_new_error_code_for_invalid_hostname);
CloseConnection(QUIC_HANDSHAKE_FAILED_INVALID_HOSTNAME,
"invalid hostname");
} else {
set_extra_error_details("select_cert_error: invalid hostname");
}
return ssl_select_cert_error;
}
if (hostname != crypto_negotiated_params_->sni) {
QUIC_CODE_COUNT(quic_tls_server_hostname_diff);
QUIC_LOG_EVERY_N_SEC(WARNING, 300)
<< "Raw and normalized hostnames differ, but both are valid SNIs. "
"raw hostname:"
<< hostname << ", normalized:" << crypto_negotiated_params_->sni;
} else {
QUIC_CODE_COUNT(quic_tls_server_hostname_same);
}
} else {
QUIC_LOG(INFO) << "No hostname indicated in SNI";
}
std::string error_details;
if (!ProcessTransportParameters(client_hello, &error_details)) {
CloseConnection(QUIC_HANDSHAKE_FAILED, error_details);
return ssl_select_cert_error;
}
OverrideQuicConfigDefaults(session()->config());
session()->OnConfigNegotiated();
auto set_transport_params_result = SetTransportParameters();
if (!set_transport_params_result.success) {
set_extra_error_details("select_cert_error: set tp failure");
return ssl_select_cert_error;
}
bssl::UniquePtr<uint8_t> ssl_capabilities;
size_t ssl_capabilities_len = 0;
absl::string_view ssl_capabilities_view;
if (CryptoUtils::GetSSLCapabilities(ssl(), &ssl_capabilities,
&ssl_capabilities_len)) {
ssl_capabilities_view =
absl::string_view(reinterpret_cast<const char*>(ssl_capabilities.get()),
ssl_capabilities_len);
}
SetApplicationSettingsResult alps_result =
SetApplicationSettings(AlpnForVersion(session()->version()));
if (!alps_result.success) {
set_extra_error_details("select_cert_error: set alps failure");
return ssl_select_cert_error;
}
if (!session()->connection()->connected()) {
select_cert_status_ = QUIC_FAILURE;
return ssl_select_cert_error;
}
can_disable_resumption_ = false;
const QuicAsyncStatus status = proof_source_handle_->SelectCertificate(
session()->connection()->self_address().Normalized(),
session()->connection()->peer_address().Normalized(),
session()->connection()->GetOriginalDestinationConnectionId(),
ssl_capabilities_view, crypto_negotiated_params_->sni,
absl::string_view(
reinterpret_cast<const char*>(client_hello->client_hello),
client_hello->client_hello_len),
AlpnForVersion(session()->version()), std::move(alps_result.alps_buffer),
set_transport_params_result.quic_transport_params,
set_transport_params_result.early_data_context,
tls_connection_.ssl_config());
QUICHE_DCHECK_EQ(status, *select_cert_status());
if (status == QUIC_PENDING) {
set_expected_ssl_error(SSL_ERROR_PENDING_CERTIFICATE);
if (async_op_timer_.has_value()) {
QUIC_CODE_COUNT(quic_tls_server_selecting_cert_while_another_op_pending);
}
async_op_timer_ = QuicTimeAccumulator();
async_op_timer_->Start(now());
return ssl_select_cert_retry;
}
if (status == QUIC_FAILURE) {
set_extra_error_details("select_cert_error: proof_source_handle failure");
return ssl_select_cert_error;
}
return ssl_select_cert_success;
}
void TlsServerHandshaker::OnSelectCertificateDone(
bool ok, bool is_sync, SSLConfig ssl_config,
absl::string_view ticket_encryption_key, bool cert_matched_sni) {
QUIC_DVLOG(1) << "OnSelectCertificateDone. ok:" << ok
<< ", is_sync:" << is_sync << ", len(ticket_encryption_key):"
<< ticket_encryption_key.size();
std::optional<QuicConnectionContextSwitcher> context_switcher;
if (!is_sync) {
context_switcher.emplace(connection_context());
}
QUIC_TRACESTRING(absl::StrCat(
"TLS select certificate done: ok:", ok,
", len(ticket_encryption_key):", ticket_encryption_key.size()));
ticket_encryption_key_ = std::string(ticket_encryption_key);
select_cert_status_ = QUIC_FAILURE;
cert_matched_sni_ = cert_matched_sni;
const QuicDelayedSSLConfig& delayed_ssl_config = absl::visit(
[](const auto& config) { return config.delayed_ssl_config; }, ssl_config);
if (delayed_ssl_config.quic_transport_parameters.has_value()) {
if (TransportParametersMatch(
absl::MakeSpan(*delayed_ssl_config.quic_transport_parameters))) {
if (SSL_set_quic_transport_params(
ssl(), delayed_ssl_config.quic_transport_parameters->data(),
delayed_ssl_config.quic_transport_parameters->size()) != 1) {
QUIC_DVLOG(1) << "SSL_set_quic_transport_params override failed";
}
} else {
QUIC_DVLOG(1)
<< "QUIC transport parameters mismatch with ProofSourceHandle";
}
}
if (delayed_ssl_config.client_cert_mode.has_value()) {
tls_connection_.SetClientCertMode(*delayed_ssl_config.client_cert_mode);
QUIC_DVLOG(1) << "client_cert_mode after cert selection: "
<< client_cert_mode();
}
if (ok) {
if (auto* local_config = absl::get_if<LocalSSLConfig>(&ssl_config);
local_config != nullptr) {
if (local_config->chain && !local_config->chain->certs.empty()) {
tls_connection_.SetCertChain(
local_config->chain->ToCryptoBuffers().value);
select_cert_status_ = QUIC_SUCCESS;
} else {
QUIC_DLOG(ERROR) << "No certs provided for host '"
<< crypto_negotiated_params_->sni
<< "', server_address:"
<< session()->connection()->self_address()
<< ", client_address:"
<< session()->connection()->peer_address();
}
} else if (auto* hints_config = absl::get_if<HintsSSLConfig>(&ssl_config);
hints_config != nullptr) {
if (hints_config->configure_ssl) {
if (const absl::Status status = tls_connection_.ConfigureSSL(
std::move(hints_config->configure_ssl));
!status.ok()) {
QUIC_CODE_COUNT(quic_tls_server_set_handshake_hints_failed);
QUIC_DVLOG(1) << "SSL_set_handshake_hints failed: " << status;
}
select_cert_status_ = QUIC_SUCCESS;
}
} else {
QUIC_DLOG(FATAL) << "Neither branch hit";
}
}
QuicConnectionStats::TlsServerOperationStats select_cert_stats;
select_cert_stats.success = (select_cert_status_ == QUIC_SUCCESS);
if (!select_cert_stats.success) {
set_extra_error_details(
"select_cert_error: proof_source_handle async failure");
}
QUICHE_DCHECK_NE(is_sync, async_op_timer_.has_value());
if (async_op_timer_.has_value()) {
async_op_timer_->Stop(now());
select_cert_stats.async_latency = async_op_timer_->GetTotalElapsedTime();
async_op_timer_.reset();
RECORD_LATENCY_IN_US("tls_server_async_select_cert_latency_us",
select_cert_stats.async_latency,
"Async select cert latency in microseconds");
}
connection_stats().tls_server_select_cert_stats =
std::move(select_cert_stats);
const int last_expected_ssl_error = expected_ssl_error();
set_expected_ssl_error(SSL_ERROR_WANT_READ);
if (!is_sync) {
QUICHE_DCHECK_EQ(last_expected_ssl_error, SSL_ERROR_PENDING_CERTIFICATE);
AdvanceHandshakeFromCallback();
}
}
bool TlsServerHandshaker::WillNotCallComputeSignature() const {
return SSL_can_release_private_key(ssl());
}
bool TlsServerHandshaker::ValidateHostname(const std::string& hostname) const {
if (!QuicHostnameUtils::IsValidSNI(hostname)) {
QUIC_DLOG(ERROR) << "Invalid SNI provided: \"" << hostname << "\"";
return false;
}
return true;
}
int TlsServerHandshaker::TlsExtServernameCallback(int* ) {
return SSL_TLSEXT_ERR_OK;
}
int TlsServerHandshaker::SelectAlpn(const uint8_t** out, uint8_t* out_len,
const uint8_t* in, unsigned in_len) {
*out_len = 0;
*out = nullptr;
if (in_len == 0) {
QUIC_DLOG(ERROR) << "No ALPN provided by client";
return SSL_TLSEXT_ERR_NOACK;
}
CBS all_alpns;
CBS_init(&all_alpns, in, in_len);
std::vector<absl::string_view> alpns;
while (CBS_len(&all_alpns) > 0) {
CBS alpn;
if (!CBS_get_u8_length_prefixed(&all_alpns, &alpn)) {
QUIC_DLOG(ERROR) << "Failed to parse ALPN length";
return SSL_TLSEXT_ERR_NOACK;
}
const size_t alpn_length = CBS_len(&alpn);
if (alpn_length == 0) {
QUIC_DLOG(ERROR) << "Received invalid zero-length ALPN";
return SSL_TLSEXT_ERR_NOACK;
}
alpns.emplace_back(reinterpret_cast<const char*>(CBS_data(&alpn)),
alpn_length);
}
auto selected_alpn = session()->SelectAlpn(alpns);
if (selected_alpn == alpns.end()) {
QUIC_DLOG(ERROR) << "No known ALPN provided by client";
return SSL_TLSEXT_ERR_NOACK;
}
session()->OnAlpnSelected(*selected_alpn);
valid_alpn_received_ = true;
*out_len = selected_alpn->size();
*out = reinterpret_cast<const uint8_t*>(selected_alpn->data());
return SSL_TLSEXT_ERR_OK;
}
TlsServerHandshaker::SetApplicationSettingsResult
TlsServerHandshaker::SetApplicationSettings(absl::string_view alpn) {
TlsServerHandshaker::SetApplicationSettingsResult result;
const std::string& hostname = crypto_negotiated_params_->sni;
std::string accept_ch_value = GetAcceptChValueForHostname(hostname);
std::string origin = absl::StrCat("https:
uint16_t port = session()->self_address().port();
if (port != kDefaultPort) {
QUIC_CODE_COUNT(quic_server_alps_non_default_port);
absl::StrAppend(&origin, ":", port);
}
if (!accept_ch_value.empty()) {
AcceptChFrame frame{{{std::move(origin), std::move(accept_ch_value)}}};
result.alps_buffer = HttpEncoder::SerializeAcceptChFrame(frame);
}
const std::string& alps = result.alps_buffer;
if (SSL_add_application_settings(
ssl(), reinterpret_cast<const uint8_t*>(alpn.data()), alpn.size(),
reinterpret_cast<const uint8_t*>(alps.data()), alps.size()) != 1) {
QUIC_DLOG(ERROR) << "Failed to enable ALPS";
result.success = false;
} else {
result.success = true;
}
return result;
}
SSL* TlsServerHandshaker::GetSsl() const { return ssl(); }
bool TlsServerHandshaker::IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const {
return level != ENCRYPTION_ZERO_RTT;
}
EncryptionLevel TlsServerHandshaker::GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return ENCRYPTION_FORWARD_SECURE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
} | #include "quiche/quic/core/tls_server_handshaker.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/certificate_util.h"
#include "quiche/quic/core/crypto/client_proof_source.h"
#include "quiche/quic/core/crypto/proof_source.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_crypto_client_stream.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_client_handshaker.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/failing_proof_source.h"
#include "quiche/quic/test_tools/fake_proof_source.h"
#include "quiche/quic/test_tools/fake_proof_source_handle.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_session_cache.h"
#include "quiche/quic/test_tools/test_certificates.h"
#include "quiche/quic/test_tools/test_ticket_crypter.h"
namespace quic {
class QuicConnection;
class QuicStream;
}
using testing::_;
using testing::HasSubstr;
using testing::NiceMock;
using testing::Return;
namespace quic {
namespace test {
namespace {
const char kServerHostname[] = "test.example.com";
const uint16_t kServerPort = 443;
struct TestParams {
ParsedQuicVersion version;
bool disable_resumption;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version), "_",
(p.disable_resumption ? "ResumptionDisabled" : "ResumptionEnabled"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (const auto& version : AllSupportedVersionsWithTls()) {
for (bool disable_resumption : {false, true}) {
params.push_back(TestParams{version, disable_resumption});
}
}
return params;
}
class TestTlsServerHandshaker : public TlsServerHandshaker {
public:
static constexpr TransportParameters::TransportParameterId
kFailHandshakeParam{0xFFEACA};
TestTlsServerHandshaker(QuicSession* session,
const QuicCryptoServerConfig* crypto_config)
: TlsServerHandshaker(session, crypto_config),
proof_source_(crypto_config->proof_source()) {
ON_CALL(*this, MaybeCreateProofSourceHandle())
.WillByDefault(testing::Invoke(
this, &TestTlsServerHandshaker::RealMaybeCreateProofSourceHandle));
ON_CALL(*this, OverrideQuicConfigDefaults(_))
.WillByDefault(testing::Invoke(
this, &TestTlsServerHandshaker::RealOverrideQuicConfigDefaults));
}
MOCK_METHOD(std::unique_ptr<ProofSourceHandle>, MaybeCreateProofSourceHandle,
(), (override));
MOCK_METHOD(void, OverrideQuicConfigDefaults, (QuicConfig * config),
(override));
void SetupProofSourceHandle(
FakeProofSourceHandle::Action select_cert_action,
FakeProofSourceHandle::Action compute_signature_action,
QuicDelayedSSLConfig dealyed_ssl_config = QuicDelayedSSLConfig()) {
EXPECT_CALL(*this, MaybeCreateProofSourceHandle())
.WillOnce(
testing::Invoke([this, select_cert_action, compute_signature_action,
dealyed_ssl_config]() {
auto handle = std::make_unique<FakeProofSourceHandle>(
proof_source_, this, select_cert_action,
compute_signature_action, dealyed_ssl_config);
fake_proof_source_handle_ = handle.get();
return handle;
}));
}
FakeProofSourceHandle* fake_proof_source_handle() {
return fake_proof_source_handle_;
}
bool received_client_cert() const { return received_client_cert_; }
using TlsServerHandshaker::AdvanceHandshake;
using TlsServerHandshaker::expected_ssl_error;
protected:
QuicAsyncStatus VerifyCertChain(
const std::vector<std::string>& certs, std::string* error_details,
std::unique_ptr<ProofVerifyDetails>* details, uint8_t* out_alert,
std::unique_ptr<ProofVerifierCallback> callback) override {
received_client_cert_ = true;
return TlsServerHandshaker::VerifyCertChain(certs, error_details, details,
out_alert, std::move(callback));
}
bool ProcessAdditionalTransportParameters(
const TransportParameters& params) override {
return !params.custom_parameters.contains(kFailHandshakeParam);
}
private:
std::unique_ptr<ProofSourceHandle> RealMaybeCreateProofSourceHandle() {
return TlsServerHandshaker::MaybeCreateProofSourceHandle();
}
void RealOverrideQuicConfigDefaults(QuicConfig* config) {
return TlsServerHandshaker::OverrideQuicConfigDefaults(config);
}
FakeProofSourceHandle* fake_proof_source_handle_ = nullptr;
ProofSource* proof_source_ = nullptr;
bool received_client_cert_ = false;
};
class TlsServerHandshakerTestSession : public TestQuicSpdyServerSession {
public:
using TestQuicSpdyServerSession::TestQuicSpdyServerSession;
std::unique_ptr<QuicCryptoServerStreamBase> CreateQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* ) override {
if (connection()->version().handshake_protocol == PROTOCOL_TLS1_3) {
return std::make_unique<NiceMock<TestTlsServerHandshaker>>(this,
crypto_config);
}
QUICHE_CHECK(false) << "Unsupported handshake protocol: "
<< connection()->version().handshake_protocol;
return nullptr;
}
};
class TlsServerHandshakerTest : public QuicTestWithParam<TestParams> {
public:
TlsServerHandshakerTest()
: server_compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize),
server_id_(kServerHostname, kServerPort),
supported_versions_({GetParam().version}) {
SetQuicFlag(quic_disable_server_tls_resumption,
GetParam().disable_resumption);
client_crypto_config_ = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting(),
std::make_unique<test::SimpleSessionCache>());
InitializeServerConfig();
InitializeServer();
InitializeFakeClient();
}
~TlsServerHandshakerTest() override {
server_session_.reset();
client_session_.reset();
helpers_.clear();
alarm_factories_.clear();
}
void InitializeServerConfig() {
auto ticket_crypter = std::make_unique<TestTicketCrypter>();
ticket_crypter_ = ticket_crypter.get();
auto proof_source = std::make_unique<FakeProofSource>();
proof_source_ = proof_source.get();
proof_source_->SetTicketCrypter(std::move(ticket_crypter));
server_crypto_config_ = std::make_unique<QuicCryptoServerConfig>(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
std::move(proof_source), KeyExchangeSource::Default());
}
void InitializeServerConfigWithFailingProofSource() {
server_crypto_config_ = std::make_unique<QuicCryptoServerConfig>(
QuicCryptoServerConfig::TESTING, QuicRandom::GetInstance(),
std::make_unique<FailingProofSource>(), KeyExchangeSource::Default());
}
void CreateTlsServerHandshakerTestSession(MockQuicConnectionHelper* helper,
MockAlarmFactory* alarm_factory) {
server_connection_ = new PacketSavingConnection(
helper, alarm_factory, Perspective::IS_SERVER,
ParsedVersionOfIndex(supported_versions_, 0));
TlsServerHandshakerTestSession* server_session =
new TlsServerHandshakerTestSession(
server_connection_, DefaultQuicConfig(), supported_versions_,
server_crypto_config_.get(), &server_compressed_certs_cache_);
server_session->set_client_cert_mode(initial_client_cert_mode_);
server_session->Initialize();
server_connection_->AdvanceTime(QuicTime::Delta::FromSeconds(100000));
QUICHE_CHECK(server_session);
server_session_.reset(server_session);
}
void InitializeServerWithFakeProofSourceHandle() {
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateTlsServerHandshakerTestSession(helpers_.back().get(),
alarm_factories_.back().get());
server_handshaker_ = static_cast<NiceMock<TestTlsServerHandshaker>*>(
server_session_->GetMutableCryptoStream());
EXPECT_CALL(*server_session_->helper(), CanAcceptClientHello(_, _, _, _, _))
.Times(testing::AnyNumber());
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillRepeatedly([this](const std::vector<absl::string_view>& alpns) {
return std::find(
alpns.cbegin(), alpns.cend(),
AlpnForVersion(server_session_->connection()->version()));
});
crypto_test_utils::SetupCryptoServerConfigForTest(
server_connection_->clock(), server_connection_->random_generator(),
server_crypto_config_.get());
}
void InitializeServer() {
TestQuicSpdyServerSession* server_session = nullptr;
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateServerSessionForTest(
server_id_, QuicTime::Delta::FromSeconds(100000), supported_versions_,
helpers_.back().get(), alarm_factories_.back().get(),
server_crypto_config_.get(), &server_compressed_certs_cache_,
&server_connection_, &server_session);
QUICHE_CHECK(server_session);
server_session_.reset(server_session);
server_handshaker_ = nullptr;
EXPECT_CALL(*server_session_->helper(), CanAcceptClientHello(_, _, _, _, _))
.Times(testing::AnyNumber());
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillRepeatedly([this](const std::vector<absl::string_view>& alpns) {
return std::find(
alpns.cbegin(), alpns.cend(),
AlpnForVersion(server_session_->connection()->version()));
});
crypto_test_utils::SetupCryptoServerConfigForTest(
server_connection_->clock(), server_connection_->random_generator(),
server_crypto_config_.get());
}
QuicCryptoServerStreamBase* server_stream() {
return server_session_->GetMutableCryptoStream();
}
QuicCryptoClientStream* client_stream() {
return client_session_->GetMutableCryptoStream();
}
void InitializeFakeClient() {
TestQuicSpdyClientSession* client_session = nullptr;
helpers_.push_back(std::make_unique<NiceMock<MockQuicConnectionHelper>>());
alarm_factories_.push_back(std::make_unique<MockAlarmFactory>());
CreateClientSessionForTest(
server_id_, QuicTime::Delta::FromSeconds(100000), supported_versions_,
helpers_.back().get(), alarm_factories_.back().get(),
client_crypto_config_.get(), &client_connection_, &client_session);
const std::string default_alpn =
AlpnForVersion(client_connection_->version());
ON_CALL(*client_session, GetAlpnsToOffer())
.WillByDefault(Return(std::vector<std::string>({default_alpn})));
QUICHE_CHECK(client_session);
client_session_.reset(client_session);
moved_messages_counts_ = {0, 0};
}
void CompleteCryptoHandshake() {
while (!client_stream()->one_rtt_keys_available() ||
!server_stream()->one_rtt_keys_available()) {
auto previous_moved_messages_counts = moved_messages_counts_;
AdvanceHandshakeWithFakeClient();
ASSERT_NE(previous_moved_messages_counts, moved_messages_counts_);
}
}
void AdvanceHandshakeWithFakeClient() {
QUICHE_CHECK(server_connection_);
QUICHE_CHECK(client_session_ != nullptr);
EXPECT_CALL(*client_session_, OnProofValid(_)).Times(testing::AnyNumber());
EXPECT_CALL(*client_session_, OnProofVerifyDetailsAvailable(_))
.Times(testing::AnyNumber());
EXPECT_CALL(*client_connection_, OnCanWrite()).Times(testing::AnyNumber());
EXPECT_CALL(*server_connection_, OnCanWrite()).Times(testing::AnyNumber());
if (moved_messages_counts_.first == 0) {
client_stream()->CryptoConnect();
}
moved_messages_counts_ = crypto_test_utils::AdvanceHandshake(
client_connection_, client_stream(), moved_messages_counts_.first,
server_connection_, server_stream(), moved_messages_counts_.second);
}
void ExpectHandshakeSuccessful() {
EXPECT_TRUE(client_stream()->one_rtt_keys_available());
EXPECT_TRUE(client_stream()->encryption_established());
EXPECT_TRUE(server_stream()->one_rtt_keys_available());
EXPECT_TRUE(server_stream()->encryption_established());
EXPECT_EQ(HANDSHAKE_COMPLETE, client_stream()->GetHandshakeState());
EXPECT_EQ(HANDSHAKE_CONFIRMED, server_stream()->GetHandshakeState());
const auto& client_crypto_params =
client_stream()->crypto_negotiated_params();
const auto& server_crypto_params =
server_stream()->crypto_negotiated_params();
EXPECT_NE(0, client_crypto_params.cipher_suite);
EXPECT_NE(0, client_crypto_params.key_exchange_group);
EXPECT_NE(0, client_crypto_params.peer_signature_algorithm);
EXPECT_EQ(client_crypto_params.cipher_suite,
server_crypto_params.cipher_suite);
EXPECT_EQ(client_crypto_params.key_exchange_group,
server_crypto_params.key_exchange_group);
EXPECT_EQ(0, server_crypto_params.peer_signature_algorithm);
}
FakeProofSourceHandle::SelectCertArgs last_select_cert_args() const {
QUICHE_CHECK(server_handshaker_ &&
server_handshaker_->fake_proof_source_handle());
QUICHE_CHECK(!server_handshaker_->fake_proof_source_handle()
->all_select_cert_args()
.empty());
return server_handshaker_->fake_proof_source_handle()
->all_select_cert_args()
.back();
}
FakeProofSourceHandle::ComputeSignatureArgs last_compute_signature_args()
const {
QUICHE_CHECK(server_handshaker_ &&
server_handshaker_->fake_proof_source_handle());
QUICHE_CHECK(!server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.empty());
return server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.back();
}
protected:
bool SetupClientCert() {
auto client_proof_source = std::make_unique<DefaultClientProofSource>();
CertificatePrivateKey client_cert_key(
MakeKeyPairForSelfSignedCertificate());
CertificateOptions options;
options.subject = "CN=subject";
options.serial_number = 0x12345678;
options.validity_start = {2020, 1, 1, 0, 0, 0};
options.validity_end = {2049, 12, 31, 0, 0, 0};
std::string der_cert =
CreateSelfSignedCertificate(*client_cert_key.private_key(), options);
quiche::QuicheReferenceCountedPointer<ClientProofSource::Chain>
client_cert_chain(new ClientProofSource::Chain({der_cert}));
if (!client_proof_source->AddCertAndKey({"*"}, client_cert_chain,
std::move(client_cert_key))) {
return false;
}
client_crypto_config_->set_proof_source(std::move(client_proof_source));
return true;
}
std::vector<std::unique_ptr<MockQuicConnectionHelper>> helpers_;
std::vector<std::unique_ptr<MockAlarmFactory>> alarm_factories_;
PacketSavingConnection* server_connection_;
std::unique_ptr<TestQuicSpdyServerSession> server_session_;
NiceMock<TestTlsServerHandshaker>* server_handshaker_ = nullptr;
TestTicketCrypter* ticket_crypter_;
FakeProofSource* proof_source_;
std::unique_ptr<QuicCryptoServerConfig> server_crypto_config_;
QuicCompressedCertsCache server_compressed_certs_cache_;
QuicServerId server_id_;
ClientCertMode initial_client_cert_mode_ = ClientCertMode::kNone;
PacketSavingConnection* client_connection_;
std::unique_ptr<QuicCryptoClientConfig> client_crypto_config_;
std::unique_ptr<TestQuicSpdyClientSession> client_session_;
crypto_test_utils::FakeClientOptions client_options_;
std::pair<size_t, size_t> moved_messages_counts_ = {0, 0};
ParsedQuicVersionVector supported_versions_;
};
INSTANTIATE_TEST_SUITE_P(TlsServerHandshakerTests, TlsServerHandshakerTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(TlsServerHandshakerTest, NotInitiallyConected) {
EXPECT_FALSE(server_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, ConnectedAfterTlsHandshake) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, server_stream()->handshake_protocol());
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertSuccess) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertFailure) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::FAIL_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
EXPECT_EQ(moved_messages_counts_.second, 0u);
EXPECT_EQ(server_handshaker_->extra_error_details(),
"select_cert_error: proof_source_handle async failure");
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSelectCertAndSignature) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_ASYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
EXPECT_EQ(server_handshaker_->expected_ssl_error(),
SSL_ERROR_PENDING_CERTIFICATE);
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
EXPECT_EQ(server_handshaker_->expected_ssl_error(),
SSL_ERROR_WANT_PRIVATE_KEY_OPERATION);
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, HandshakeWithAsyncSignature) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
proof_source_->Activate();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(proof_source_->NumPendingCallbacks(), 1);
proof_source_->InvokePendingCallback(0);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, CancelPendingSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->CancelOutstandingCallbacks();
ASSERT_FALSE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
}
TEST_P(TlsServerHandshakerTest, CancelPendingSignature) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
proof_source_->Activate();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(proof_source_->NumPendingCallbacks(), 1);
server_session_ = nullptr;
proof_source_->InvokePendingCallback(0);
}
TEST_P(TlsServerHandshakerTest, ExtractSNI) {
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni,
"test.example.com");
}
TEST_P(TlsServerHandshakerTest, ServerConnectionIdPassedToSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_session_->set_early_data_enabled(false);
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(last_select_cert_args().original_connection_id, TestConnectionId());
}
TEST_P(TlsServerHandshakerTest, HostnameForCertSelectionAndComputeSignature) {
server_id_ = QuicServerId("tEsT.EXAMPLE.CoM", kServerPort);
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni,
"test.example.com");
EXPECT_EQ(last_select_cert_args().hostname, "test.example.com");
EXPECT_EQ(last_compute_signature_args().hostname, "test.example.com");
}
TEST_P(TlsServerHandshakerTest, SSLConfigForCertSelection) {
InitializeServerWithFakeProofSourceHandle();
server_session_->set_early_data_enabled(false);
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(last_select_cert_args().ssl_config.early_data_enabled);
}
TEST_P(TlsServerHandshakerTest, ConnectionClosedOnTlsError) {
EXPECT_CALL(*server_connection_,
CloseConnection(QUIC_HANDSHAKE_FAILED, _, _, _));
char bogus_handshake_message[] = {
1,
0, 0, 0,
};
QuicConnection::ScopedPacketFlusher flusher(server_connection_);
server_stream()->crypto_message_parser()->ProcessInput(
absl::string_view(bogus_handshake_message,
ABSL_ARRAYSIZE(bogus_handshake_message)),
ENCRYPTION_INITIAL);
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, ClientSendingBadALPN) {
const std::string kTestBadClientAlpn = "bad-client-alpn";
EXPECT_CALL(*client_session_, GetAlpnsToOffer())
.WillOnce(Return(std::vector<std::string>({kTestBadClientAlpn})));
EXPECT_CALL(
*server_connection_,
CloseConnection(
QUIC_HANDSHAKE_FAILED,
static_cast<QuicIetfTransportErrorCodes>(CRYPTO_ERROR_FIRST + 120),
HasSubstr("TLS handshake failure (ENCRYPTION_INITIAL) 120: "
"no application protocol"),
_));
AdvanceHandshakeWithFakeClient();
EXPECT_FALSE(client_stream()->one_rtt_keys_available());
EXPECT_FALSE(client_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
EXPECT_FALSE(server_stream()->encryption_established());
}
TEST_P(TlsServerHandshakerTest, CustomALPNNegotiation) {
EXPECT_CALL(*client_connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
const std::string kTestAlpn = "A Custom ALPN Value";
const std::vector<std::string> kTestAlpns(
{"foo", "bar", kTestAlpn, "something else"});
EXPECT_CALL(*client_session_, GetAlpnsToOffer())
.WillRepeatedly(Return(kTestAlpns));
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillOnce(
[kTestAlpn, kTestAlpns](const std::vector<absl::string_view>& alpns) {
EXPECT_THAT(alpns, testing::ElementsAreArray(kTestAlpns));
return std::find(alpns.cbegin(), alpns.cend(), kTestAlpn);
});
EXPECT_CALL(*client_session_, OnAlpnSelected(absl::string_view(kTestAlpn)));
EXPECT_CALL(*server_session_, OnAlpnSelected(absl::string_view(kTestAlpn)));
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
TEST_P(TlsServerHandshakerTest, RejectInvalidSNI) {
SetQuicFlag(quic_client_allow_invalid_sni_for_test, true);
server_id_ = QuicServerId("invalid!.example.com", kServerPort);
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
EXPECT_FALSE(server_stream()->encryption_established());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
}
TEST_P(TlsServerHandshakerTest, Resumption) {
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_FALSE(server_stream()->ResumptionAttempted());
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_NE(client_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_NE(server_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_NE(server_stream()->ResumptionAttempted(),
GetParam().disable_resumption);
}
TEST_P(TlsServerHandshakerTest, ResumptionWithAsyncDecryptCallback) {
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->SetRunCallbacksAsync(true);
InitializeServer();
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
if (GetParam().disable_resumption) {
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 0u);
return;
}
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 1u);
ticket_crypter_->RunPendingCallback(0);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(client_stream()->IsResumption());
EXPECT_TRUE(server_stream()->IsResumption());
EXPECT_TRUE(server_stream()->ResumptionAttempted());
}
TEST_P(TlsServerHandshakerTest, ResumptionWithPlaceholderTicket) {
InitializeFakeClient();
ticket_crypter_->set_fail_encrypt(true);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_FALSE(server_stream()->ResumptionAttempted());
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_NE(server_stream()->ResumptionAttempted(),
GetParam().disable_resumption);
}
TEST_P(TlsServerHandshakerTest, AdvanceHandshakeDuringAsyncDecryptCallback) {
if (GetParam().disable_resumption) {
return;
}
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->SetRunCallbacksAsync(true);
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 1u);
{
QuicConnection::ScopedPacketFlusher flusher(server_connection_);
server_handshaker_->AdvanceHandshake();
}
server_session_ = nullptr;
ticket_crypter_->RunPendingCallback(0);
}
TEST_P(TlsServerHandshakerTest, ResumptionWithFailingDecryptCallback) {
if (GetParam().disable_resumption) {
return;
}
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->set_fail_decrypt(true);
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_TRUE(server_stream()->ResumptionAttempted());
}
TEST_P(TlsServerHandshakerTest, ResumptionWithFailingAsyncDecryptCallback) {
if (GetParam().disable_resumption) {
return;
}
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
ticket_crypter_->set_fail_decrypt(true);
ticket_crypter_->SetRunCallbacksAsync(true);
InitializeServer();
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
ASSERT_EQ(ticket_crypter_->NumPendingCallbacks(), 1u);
ticket_crypter_->RunPendingCallback(0);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsResumption());
EXPECT_TRUE(server_stream()->ResumptionAttempted());
}
TEST_P(TlsServerHandshakerTest, HandshakeFailsWithFailingProofSource) {
InitializeServerConfigWithFailingProofSource();
InitializeServer();
InitializeFakeClient();
AdvanceHandshakeWithFakeClient();
EXPECT_EQ(moved_messages_counts_.second, 0u);
}
TEST_P(TlsServerHandshakerTest, ZeroRttResumption) {
std::vector<uint8_t> application_state = {0, 1, 2, 3};
server_stream()->SetServerApplicationStateForResumption(
std::make_unique<ApplicationState>(application_state));
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsZeroRtt());
InitializeServer();
server_stream()->SetServerApplicationStateForResumption(
std::make_unique<ApplicationState>(application_state));
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_NE(client_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_NE(server_stream()->IsZeroRtt(), GetParam().disable_resumption);
}
TEST_P(TlsServerHandshakerTest, ZeroRttRejectOnApplicationStateChange) {
std::vector<uint8_t> original_application_state = {1, 2};
std::vector<uint8_t> new_application_state = {3, 4};
server_stream()->SetServerApplicationStateForResumption(
std::make_unique<ApplicationState>(original_application_state));
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(client_stream()->IsResumption());
EXPECT_FALSE(server_stream()->IsZeroRtt());
InitializeServer();
server_stream()->SetServerApplicationStateForResumption(
std::make_unique<ApplicationState>(new_application_state));
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_NE(client_stream()->IsResumption(), GetParam().disable_resumption);
EXPECT_FALSE(server_stream()->IsZeroRtt());
}
TEST_P(TlsServerHandshakerTest, RequestClientCert) {
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
initial_client_cert_mode_ = ClientCertMode::kRequest;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest,
SetInvalidServerTransportParamsByDelayedSslConfig) {
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
QuicDelayedSSLConfig delayed_ssl_config;
delayed_ssl_config.quic_transport_parameters = {1, 2, 3};
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::DELEGATE_SYNC,
delayed_ssl_config);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.empty());
}
TEST_P(TlsServerHandshakerTest,
SetValidServerTransportParamsByDelayedSslConfig) {
ParsedQuicVersion version = GetParam().version;
TransportParameters server_params;
std::string error_details;
server_params.perspective = quic::Perspective::IS_SERVER;
server_params.legacy_version_information =
TransportParameters::LegacyVersionInformation();
server_params.legacy_version_information.value().supported_versions =
quic::CreateQuicVersionLabelVector(
quic::ParsedQuicVersionVector{version});
server_params.legacy_version_information.value().version =
quic::CreateQuicVersionLabel(version);
server_params.version_information = TransportParameters::VersionInformation();
server_params.version_information.value().chosen_version =
quic::CreateQuicVersionLabel(version);
server_params.version_information.value().other_versions =
quic::CreateQuicVersionLabelVector(
quic::ParsedQuicVersionVector{version});
ASSERT_TRUE(server_params.AreValid(&error_details)) << error_details;
std::vector<uint8_t> server_params_bytes;
ASSERT_TRUE(
SerializeTransportParameters(server_params, &server_params_bytes));
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
QuicDelayedSSLConfig delayed_ssl_config;
delayed_ssl_config.quic_transport_parameters = server_params_bytes;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::DELEGATE_SYNC,
delayed_ssl_config);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(server_handshaker_->fake_proof_source_handle()
->all_compute_signature_args()
.empty());
}
TEST_P(TlsServerHandshakerTest, RequestClientCertByDelayedSslConfig) {
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
QuicDelayedSSLConfig delayed_ssl_config;
delayed_ssl_config.client_cert_mode = ClientCertMode::kRequest;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::DELEGATE_SYNC,
delayed_ssl_config);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest, RequestClientCert_NoCert) {
initial_client_cert_mode_ = ClientCertMode::kRequest;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_FALSE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest, RequestAndRequireClientCert) {
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
initial_client_cert_mode_ = ClientCertMode::kRequire;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest, RequestAndRequireClientCertByDelayedSslConfig) {
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
QuicDelayedSSLConfig delayed_ssl_config;
delayed_ssl_config.client_cert_mode = ClientCertMode::kRequire;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::DELEGATE_SYNC,
delayed_ssl_config);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_TRUE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest, RequestAndRequireClientCert_NoCert) {
initial_client_cert_mode_ = ClientCertMode::kRequire;
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*server_connection_,
CloseConnection(QUIC_TLS_CERTIFICATE_REQUIRED, _, _, _));
AdvanceHandshakeWithFakeClient();
AdvanceHandshakeWithFakeClient();
EXPECT_FALSE(server_handshaker_->received_client_cert());
}
TEST_P(TlsServerHandshakerTest, CloseConnectionBeforeSelectCert) {
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::
FAIL_SYNC_DO_NOT_CHECK_CLOSED,
FakeProofSourceHandle::Action::
FAIL_SYNC_DO_NOT_CHECK_CLOSED);
EXPECT_CALL(*server_handshaker_, OverrideQuicConfigDefaults(_))
.WillOnce(testing::Invoke([](QuicConfig* config) {
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(config,
0);
}));
EXPECT_CALL(*server_connection_,
CloseConnection(QUIC_ZERO_RTT_RESUMPTION_LIMIT_REDUCED, _, _))
.WillOnce(testing::Invoke(
[this](QuicErrorCode error, const std::string& details,
ConnectionCloseBehavior connection_close_behavior) {
server_connection_->ReallyCloseConnection(
error, details, connection_close_behavior);
ASSERT_FALSE(server_connection_->connected());
}));
AdvanceHandshakeWithFakeClient();
EXPECT_TRUE(server_handshaker_->fake_proof_source_handle()
->all_select_cert_args()
.empty());
}
TEST_P(TlsServerHandshakerTest, FailUponCustomTranportParam) {
client_session_->config()->custom_transport_parameters_to_send().emplace(
TestTlsServerHandshaker::kFailHandshakeParam,
"Fail handshake upon seeing this.");
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(
*server_connection_,
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Failed to process additional transport parameters", _));
AdvanceHandshakeWithFakeClient();
}
TEST_P(TlsServerHandshakerTest, SuccessWithCustomTranportParam) {
client_session_->config()->custom_transport_parameters_to_send().emplace(
TransportParameters::TransportParameterId{0xFFEADD},
"Continue upon seeing this.");
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_ASYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
EXPECT_CALL(*server_connection_, CloseConnection(_, _, _)).Times(0);
AdvanceHandshakeWithFakeClient();
ASSERT_TRUE(
server_handshaker_->fake_proof_source_handle()->HasPendingOperation());
server_handshaker_->fake_proof_source_handle()->CompletePendingOperation();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
}
#if BORINGSSL_API_VERSION >= 22
TEST_P(TlsServerHandshakerTest, EnableKyber) {
server_crypto_config_->set_preferred_groups(
{SSL_GROUP_X25519_KYBER768_DRAFT00});
client_crypto_config_->set_preferred_groups(
{SSL_GROUP_X25519_KYBER768_DRAFT00, SSL_GROUP_X25519, SSL_GROUP_SECP256R1,
SSL_GROUP_SECP384R1});
InitializeServer();
InitializeFakeClient();
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(PROTOCOL_TLS1_3, server_stream()->handshake_protocol());
EXPECT_EQ(SSL_GROUP_X25519_KYBER768_DRAFT00,
SSL_get_group_id(server_stream()->GetSsl()));
}
#endif
#if BORINGSSL_API_VERSION >= 27
TEST_P(TlsServerHandshakerTest, AlpsUseNewCodepoint) {
const struct {
bool client_use_alps_new_codepoint;
bool server_allow_alps_new_codepoint;
} tests[] = {
{true, true},
{false, true},
{false, false},
{true, true},
};
for (size_t i = 0; i < ABSL_ARRAYSIZE(tests); i++) {
SCOPED_TRACE(absl::StrCat("Test #", i));
const auto& test = tests[i];
client_crypto_config_->set_alps_use_new_codepoint(
test.client_use_alps_new_codepoint);
SetQuicReloadableFlag(quic_gfe_allow_alps_new_codepoint,
test.server_allow_alps_new_codepoint);
ASSERT_TRUE(SetupClientCert());
InitializeFakeClient();
InitializeServerWithFakeProofSourceHandle();
server_handshaker_->SetupProofSourceHandle(
FakeProofSourceHandle::Action::DELEGATE_SYNC,
FakeProofSourceHandle::Action::
DELEGATE_SYNC);
AdvanceHandshakeWithFakeClient();
EXPECT_EQ(test.client_use_alps_new_codepoint,
server_handshaker_->UseAlpsNewCodepoint());
CompleteCryptoHandshake();
ExpectHandshakeSuccessful();
EXPECT_EQ(PROTOCOL_TLS1_3, server_stream()->handshake_protocol());
}
}
#endif
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_server_handshaker.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_server_handshaker_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
df1d4917-d2e6-47fd-912a-265947ba8239 | cpp | google/leveldb | env_posix | util/env_posix.cc | util/env_posix_test.cc | #include <dirent.h>
#include <fcntl.h>
#include <sys/mman.h>
#ifndef __Fuchsia__
#include <sys/resource.h>
#endif
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
#include <atomic>
#include <cerrno>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <queue>
#include <set>
#include <string>
#include <thread>
#include <type_traits>
#include <utility>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "leveldb/status.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/env_posix_test_helper.h"
#include "util/posix_logger.h"
namespace leveldb {
namespace {
int g_open_read_only_file_limit = -1;
constexpr const int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
int g_mmap_limit = kDefaultMmapLimit;
#if defined(HAVE_O_CLOEXEC)
constexpr const int kOpenBaseFlags = O_CLOEXEC;
#else
constexpr const int kOpenBaseFlags = 0;
#endif
constexpr const size_t kWritableFileBufferSize = 65536;
Status PosixError(const std::string& context, int error_number) {
if (error_number == ENOENT) {
return Status::NotFound(context, std::strerror(error_number));
} else {
return Status::IOError(context, std::strerror(error_number));
}
}
class Limiter {
public:
Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete;
bool Acquire() {
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
if (old_acquires_allowed > 0) return true;
int pre_increment_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)pre_increment_acquires_allowed;
assert(pre_increment_acquires_allowed < max_acquires_);
return false;
}
void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)old_acquires_allowed;
assert(old_acquires_allowed < max_acquires_);
}
private:
#if !defined(NDEBUG)
const int max_acquires_;
#endif
std::atomic<int> acquires_allowed_;
};
class PosixSequentialFile final : public SequentialFile {
public:
PosixSequentialFile(std::string filename, int fd)
: fd_(fd), filename_(std::move(filename)) {}
~PosixSequentialFile() override { close(fd_); }
Status Read(size_t n, Slice* result, char* scratch) override {
Status status;
while (true) {
::ssize_t read_size = ::read(fd_, scratch, n);
if (read_size < 0) {
if (errno == EINTR) {
continue;
}
status = PosixError(filename_, errno);
break;
}
*result = Slice(scratch, read_size);
break;
}
return status;
}
Status Skip(uint64_t n) override {
if (::lseek(fd_, n, SEEK_CUR) == static_cast<off_t>(-1)) {
return PosixError(filename_, errno);
}
return Status::OK();
}
private:
const int fd_;
const std::string filename_;
};
class PosixRandomAccessFile final : public RandomAccessFile {
public:
PosixRandomAccessFile(std::string filename, int fd, Limiter* fd_limiter)
: has_permanent_fd_(fd_limiter->Acquire()),
fd_(has_permanent_fd_ ? fd : -1),
fd_limiter_(fd_limiter),
filename_(std::move(filename)) {
if (!has_permanent_fd_) {
assert(fd_ == -1);
::close(fd);
}
}
~PosixRandomAccessFile() override {
if (has_permanent_fd_) {
assert(fd_ != -1);
::close(fd_);
fd_limiter_->Release();
}
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
int fd = fd_;
if (!has_permanent_fd_) {
fd = ::open(filename_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
return PosixError(filename_, errno);
}
}
assert(fd != -1);
Status status;
ssize_t read_size = ::pread(fd, scratch, n, static_cast<off_t>(offset));
*result = Slice(scratch, (read_size < 0) ? 0 : read_size);
if (read_size < 0) {
status = PosixError(filename_, errno);
}
if (!has_permanent_fd_) {
assert(fd != fd_);
::close(fd);
}
return status;
}
private:
const bool has_permanent_fd_;
const int fd_;
Limiter* const fd_limiter_;
const std::string filename_;
};
class PosixMmapReadableFile final : public RandomAccessFile {
public:
PosixMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter)
: mmap_base_(mmap_base),
length_(length),
mmap_limiter_(mmap_limiter),
filename_(std::move(filename)) {}
~PosixMmapReadableFile() override {
::munmap(static_cast<void*>(mmap_base_), length_);
mmap_limiter_->Release();
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
return PosixError(filename_, EINVAL);
}
*result = Slice(mmap_base_ + offset, n);
return Status::OK();
}
private:
char* const mmap_base_;
const size_t length_;
Limiter* const mmap_limiter_;
const std::string filename_;
};
class PosixWritableFile final : public WritableFile {
public:
PosixWritableFile(std::string filename, int fd)
: pos_(0),
fd_(fd),
is_manifest_(IsManifest(filename)),
filename_(std::move(filename)),
dirname_(Dirname(filename_)) {}
~PosixWritableFile() override {
if (fd_ >= 0) {
Close();
}
}
Status Append(const Slice& data) override {
size_t write_size = data.size();
const char* write_data = data.data();
size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
std::memcpy(buf_ + pos_, write_data, copy_size);
write_data += copy_size;
write_size -= copy_size;
pos_ += copy_size;
if (write_size == 0) {
return Status::OK();
}
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (write_size < kWritableFileBufferSize) {
std::memcpy(buf_, write_data, write_size);
pos_ = write_size;
return Status::OK();
}
return WriteUnbuffered(write_data, write_size);
}
Status Close() override {
Status status = FlushBuffer();
const int close_result = ::close(fd_);
if (close_result < 0 && status.ok()) {
status = PosixError(filename_, errno);
}
fd_ = -1;
return status;
}
Status Flush() override { return FlushBuffer(); }
Status Sync() override {
Status status = SyncDirIfManifest();
if (!status.ok()) {
return status;
}
status = FlushBuffer();
if (!status.ok()) {
return status;
}
return SyncFd(fd_, filename_);
}
private:
Status FlushBuffer() {
Status status = WriteUnbuffered(buf_, pos_);
pos_ = 0;
return status;
}
Status WriteUnbuffered(const char* data, size_t size) {
while (size > 0) {
ssize_t write_result = ::write(fd_, data, size);
if (write_result < 0) {
if (errno == EINTR) {
continue;
}
return PosixError(filename_, errno);
}
data += write_result;
size -= write_result;
}
return Status::OK();
}
Status SyncDirIfManifest() {
Status status;
if (!is_manifest_) {
return status;
}
int fd = ::open(dirname_.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
status = PosixError(dirname_, errno);
} else {
status = SyncFd(fd, dirname_);
::close(fd);
}
return status;
}
static Status SyncFd(int fd, const std::string& fd_path) {
#if HAVE_FULLFSYNC
if (::fcntl(fd, F_FULLFSYNC) == 0) {
return Status::OK();
}
#endif
#if HAVE_FDATASYNC
bool sync_success = ::fdatasync(fd) == 0;
#else
bool sync_success = ::fsync(fd) == 0;
#endif
if (sync_success) {
return Status::OK();
}
return PosixError(fd_path, errno);
}
static std::string Dirname(const std::string& filename) {
std::string::size_type separator_pos = filename.rfind('/');
if (separator_pos == std::string::npos) {
return std::string(".");
}
assert(filename.find('/', separator_pos + 1) == std::string::npos);
return filename.substr(0, separator_pos);
}
static Slice Basename(const std::string& filename) {
std::string::size_type separator_pos = filename.rfind('/');
if (separator_pos == std::string::npos) {
return Slice(filename);
}
assert(filename.find('/', separator_pos + 1) == std::string::npos);
return Slice(filename.data() + separator_pos + 1,
filename.length() - separator_pos - 1);
}
static bool IsManifest(const std::string& filename) {
return Basename(filename).starts_with("MANIFEST");
}
char buf_[kWritableFileBufferSize];
size_t pos_;
int fd_;
const bool is_manifest_;
const std::string filename_;
const std::string dirname_;
};
int LockOrUnlock(int fd, bool lock) {
errno = 0;
struct ::flock file_lock_info;
std::memset(&file_lock_info, 0, sizeof(file_lock_info));
file_lock_info.l_type = (lock ? F_WRLCK : F_UNLCK);
file_lock_info.l_whence = SEEK_SET;
file_lock_info.l_start = 0;
file_lock_info.l_len = 0;
return ::fcntl(fd, F_SETLK, &file_lock_info);
}
class PosixFileLock : public FileLock {
public:
PosixFileLock(int fd, std::string filename)
: fd_(fd), filename_(std::move(filename)) {}
int fd() const { return fd_; }
const std::string& filename() const { return filename_; }
private:
const int fd_;
const std::string filename_;
};
class PosixLockTable {
public:
bool Insert(const std::string& fname) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
bool succeeded = locked_files_.insert(fname).second;
mu_.Unlock();
return succeeded;
}
void Remove(const std::string& fname) LOCKS_EXCLUDED(mu_) {
mu_.Lock();
locked_files_.erase(fname);
mu_.Unlock();
}
private:
port::Mutex mu_;
std::set<std::string> locked_files_ GUARDED_BY(mu_);
};
class PosixEnv : public Env {
public:
PosixEnv();
~PosixEnv() override {
static const char msg[] =
"PosixEnv singleton destroyed. Unsupported behavior!\n";
std::fwrite(msg, 1, sizeof(msg), stderr);
std::abort();
}
Status NewSequentialFile(const std::string& filename,
SequentialFile** result) override {
int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixSequentialFile(filename, fd);
return Status::OK();
}
Status NewRandomAccessFile(const std::string& filename,
RandomAccessFile** result) override {
*result = nullptr;
int fd = ::open(filename.c_str(), O_RDONLY | kOpenBaseFlags);
if (fd < 0) {
return PosixError(filename, errno);
}
if (!mmap_limiter_.Acquire()) {
*result = new PosixRandomAccessFile(filename, fd, &fd_limiter_);
return Status::OK();
}
uint64_t file_size;
Status status = GetFileSize(filename, &file_size);
if (status.ok()) {
void* mmap_base =
::mmap(nullptr, file_size, PROT_READ, MAP_SHARED, fd, 0);
if (mmap_base != MAP_FAILED) {
*result = new PosixMmapReadableFile(filename,
reinterpret_cast<char*>(mmap_base),
file_size, &mmap_limiter_);
} else {
status = PosixError(filename, errno);
}
}
::close(fd);
if (!status.ok()) {
mmap_limiter_.Release();
}
return status;
}
Status NewWritableFile(const std::string& filename,
WritableFile** result) override {
int fd = ::open(filename.c_str(),
O_TRUNC | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixWritableFile(filename, fd);
return Status::OK();
}
Status NewAppendableFile(const std::string& filename,
WritableFile** result) override {
int fd = ::open(filename.c_str(),
O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
*result = new PosixWritableFile(filename, fd);
return Status::OK();
}
bool FileExists(const std::string& filename) override {
return ::access(filename.c_str(), F_OK) == 0;
}
Status GetChildren(const std::string& directory_path,
std::vector<std::string>* result) override {
result->clear();
::DIR* dir = ::opendir(directory_path.c_str());
if (dir == nullptr) {
return PosixError(directory_path, errno);
}
struct ::dirent* entry;
while ((entry = ::readdir(dir)) != nullptr) {
result->emplace_back(entry->d_name);
}
::closedir(dir);
return Status::OK();
}
Status RemoveFile(const std::string& filename) override {
if (::unlink(filename.c_str()) != 0) {
return PosixError(filename, errno);
}
return Status::OK();
}
Status CreateDir(const std::string& dirname) override {
if (::mkdir(dirname.c_str(), 0755) != 0) {
return PosixError(dirname, errno);
}
return Status::OK();
}
Status RemoveDir(const std::string& dirname) override {
if (::rmdir(dirname.c_str()) != 0) {
return PosixError(dirname, errno);
}
return Status::OK();
}
Status GetFileSize(const std::string& filename, uint64_t* size) override {
struct ::stat file_stat;
if (::stat(filename.c_str(), &file_stat) != 0) {
*size = 0;
return PosixError(filename, errno);
}
*size = file_stat.st_size;
return Status::OK();
}
Status RenameFile(const std::string& from, const std::string& to) override {
if (std::rename(from.c_str(), to.c_str()) != 0) {
return PosixError(from, errno);
}
return Status::OK();
}
Status LockFile(const std::string& filename, FileLock** lock) override {
*lock = nullptr;
int fd = ::open(filename.c_str(), O_RDWR | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
return PosixError(filename, errno);
}
if (!locks_.Insert(filename)) {
::close(fd);
return Status::IOError("lock " + filename, "already held by process");
}
if (LockOrUnlock(fd, true) == -1) {
int lock_errno = errno;
::close(fd);
locks_.Remove(filename);
return PosixError("lock " + filename, lock_errno);
}
*lock = new PosixFileLock(fd, filename);
return Status::OK();
}
Status UnlockFile(FileLock* lock) override {
PosixFileLock* posix_file_lock = static_cast<PosixFileLock*>(lock);
if (LockOrUnlock(posix_file_lock->fd(), false) == -1) {
return PosixError("unlock " + posix_file_lock->filename(), errno);
}
locks_.Remove(posix_file_lock->filename());
::close(posix_file_lock->fd());
delete posix_file_lock;
return Status::OK();
}
void Schedule(void (*background_work_function)(void* background_work_arg),
void* background_work_arg) override;
void StartThread(void (*thread_main)(void* thread_main_arg),
void* thread_main_arg) override {
std::thread new_thread(thread_main, thread_main_arg);
new_thread.detach();
}
Status GetTestDirectory(std::string* result) override {
const char* env = std::getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
} else {
char buf[100];
std::snprintf(buf, sizeof(buf), "/tmp/leveldbtest-%d",
static_cast<int>(::geteuid()));
*result = buf;
}
CreateDir(*result);
return Status::OK();
}
Status NewLogger(const std::string& filename, Logger** result) override {
int fd = ::open(filename.c_str(),
O_APPEND | O_WRONLY | O_CREAT | kOpenBaseFlags, 0644);
if (fd < 0) {
*result = nullptr;
return PosixError(filename, errno);
}
std::FILE* fp = ::fdopen(fd, "w");
if (fp == nullptr) {
::close(fd);
*result = nullptr;
return PosixError(filename, errno);
} else {
*result = new PosixLogger(fp);
return Status::OK();
}
}
uint64_t NowMicros() override {
static constexpr uint64_t kUsecondsPerSecond = 1000000;
struct ::timeval tv;
::gettimeofday(&tv, nullptr);
return static_cast<uint64_t>(tv.tv_sec) * kUsecondsPerSecond + tv.tv_usec;
}
void SleepForMicroseconds(int micros) override {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
void BackgroundThreadMain();
static void BackgroundThreadEntryPoint(PosixEnv* env) {
env->BackgroundThreadMain();
}
struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {}
void (*const function)(void*);
void* const arg;
};
port::Mutex background_work_mutex_;
port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
bool started_background_thread_ GUARDED_BY(background_work_mutex_);
std::queue<BackgroundWorkItem> background_work_queue_
GUARDED_BY(background_work_mutex_);
PosixLockTable locks_;
Limiter mmap_limiter_;
Limiter fd_limiter_;
};
int MaxMmaps() { return g_mmap_limit; }
int MaxOpenFiles() {
if (g_open_read_only_file_limit >= 0) {
return g_open_read_only_file_limit;
}
#ifdef __Fuchsia__
g_open_read_only_file_limit = 50;
#else
struct ::rlimit rlim;
if (::getrlimit(RLIMIT_NOFILE, &rlim)) {
g_open_read_only_file_limit = 50;
} else if (rlim.rlim_cur == RLIM_INFINITY) {
g_open_read_only_file_limit = std::numeric_limits<int>::max();
} else {
g_open_read_only_file_limit = rlim.rlim_cur / 5;
}
#endif
return g_open_read_only_file_limit;
}
}
PosixEnv::PosixEnv()
: background_work_cv_(&background_work_mutex_),
started_background_thread_(false),
mmap_limiter_(MaxMmaps()),
fd_limiter_(MaxOpenFiles()) {}
void PosixEnv::Schedule(
void (*background_work_function)(void* background_work_arg),
void* background_work_arg) {
background_work_mutex_.Lock();
if (!started_background_thread_) {
started_background_thread_ = true;
std::thread background_thread(PosixEnv::BackgroundThreadEntryPoint, this);
background_thread.detach();
}
if (background_work_queue_.empty()) {
background_work_cv_.Signal();
}
background_work_queue_.emplace(background_work_function, background_work_arg);
background_work_mutex_.Unlock();
}
void PosixEnv::BackgroundThreadMain() {
while (true) {
background_work_mutex_.Lock();
while (background_work_queue_.empty()) {
background_work_cv_.Wait();
}
assert(!background_work_queue_.empty());
auto background_work_function = background_work_queue_.front().function;
void* background_work_arg = background_work_queue_.front().arg;
background_work_queue_.pop();
background_work_mutex_.Unlock();
background_work_function(background_work_arg);
}
}
namespace {
template <typename EnvType>
class SingletonEnv {
public:
SingletonEnv() {
#if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order_relaxed);
#endif
static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env");
static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
"env_storage_ does not meet the Env's alignment needs");
new (&env_storage_) EnvType();
}
~SingletonEnv() = default;
SingletonEnv(const SingletonEnv&) = delete;
SingletonEnv& operator=(const SingletonEnv&) = delete;
Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
static void AssertEnvNotInitialized() {
#if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order_relaxed));
#endif
}
private:
typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
env_storage_;
#if !defined(NDEBUG)
static std::atomic<bool> env_initialized_;
#endif
};
#if !defined(NDEBUG)
template <typename EnvType>
std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
#endif
using PosixDefaultEnv = SingletonEnv<PosixEnv>;
}
void EnvPosixTestHelper::SetReadOnlyFDLimit(int limit) {
PosixDefaultEnv::AssertEnvNotInitialized();
g_open_read_only_file_limit = limit;
}
void EnvPosixTestHelper::SetReadOnlyMMapLimit(int limit) {
PosixDefaultEnv::AssertEnvNotInitialized();
g_mmap_limit = limit;
}
Env* Env::Default() {
static PosixDefaultEnv env_container;
return env_container.env();
}
} | #include <sys/resource.h>
#include <sys/wait.h>
#include <unistd.h>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <string>
#include <unordered_set>
#include <vector>
#include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "util/env_posix_test_helper.h"
#include "util/testutil.h"
#if HAVE_O_CLOEXEC
namespace {
constexpr int kTextCloseOnExecHelperExecFailedCode = 61;
constexpr int kTextCloseOnExecHelperDup2FailedCode = 62;
constexpr int kTextCloseOnExecHelperFoundOpenFdCode = 63;
std::vector<char>* GetArgvZero() {
static std::vector<char> program_name;
return &program_name;
}
static const char kTestCloseOnExecSwitch[] = "--test-close-on-exec-helper";
int TestCloseOnExecHelperMain(char* pid_arg) {
int fd = std::atoi(pid_arg);
if (::dup2(fd, fd) == fd) {
std::fprintf(stderr, "Unexpected open fd %d\n", fd);
return kTextCloseOnExecHelperFoundOpenFdCode;
}
if (errno != EBADF) {
std::fprintf(stderr, "Unexpected errno after calling dup2 on fd %d: %s\n",
fd, std::strerror(errno));
return kTextCloseOnExecHelperDup2FailedCode;
}
return 0;
}
void GetMaxFileDescriptor(int* result_fd) {
::rlimit fd_rlimit;
ASSERT_EQ(0, ::getrlimit(RLIMIT_NOFILE, &fd_rlimit));
*result_fd = fd_rlimit.rlim_cur;
}
void GetOpenFileDescriptors(std::unordered_set<int>* open_fds) {
int max_fd = 0;
GetMaxFileDescriptor(&max_fd);
for (int fd = 0; fd < max_fd; ++fd) {
if (::dup2(fd, fd) != fd) {
ASSERT_EQ(EBADF, errno)
<< "dup2() should set errno to EBADF on closed file descriptors";
continue;
}
open_fds->insert(fd);
}
}
void GetNewlyOpenedFileDescriptor(
const std::unordered_set<int>& baseline_open_fds, int* result_fd) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
for (int fd : baseline_open_fds) {
ASSERT_EQ(1, open_fds.count(fd))
<< "Previously opened file descriptor was closed during test setup";
open_fds.erase(fd);
}
ASSERT_EQ(1, open_fds.size())
<< "Expected exactly one newly opened file descriptor during test setup";
*result_fd = *open_fds.begin();
}
void CheckCloseOnExecDoesNotLeakFDs(
const std::unordered_set<int>& baseline_open_fds) {
char switch_buffer[sizeof(kTestCloseOnExecSwitch)];
std::memcpy(switch_buffer, kTestCloseOnExecSwitch,
sizeof(kTestCloseOnExecSwitch));
int probed_fd;
GetNewlyOpenedFileDescriptor(baseline_open_fds, &probed_fd);
std::string fd_string = std::to_string(probed_fd);
std::vector<char> fd_buffer(fd_string.begin(), fd_string.end());
fd_buffer.emplace_back('\0');
char* child_argv[] = {GetArgvZero()->data(), switch_buffer, fd_buffer.data(),
nullptr};
constexpr int kForkInChildProcessReturnValue = 0;
int child_pid = fork();
if (child_pid == kForkInChildProcessReturnValue) {
::execv(child_argv[0], child_argv);
std::fprintf(stderr, "Error spawning child process: %s\n", strerror(errno));
std::exit(kTextCloseOnExecHelperExecFailedCode);
}
int child_status = 0;
ASSERT_EQ(child_pid, ::waitpid(child_pid, &child_status, 0));
ASSERT_TRUE(WIFEXITED(child_status))
<< "The helper process did not exit with an exit code";
ASSERT_EQ(0, WEXITSTATUS(child_status))
<< "The helper process encountered an error";
}
}
#endif
namespace leveldb {
static const int kReadOnlyFileLimit = 4;
static const int kMMapLimit = 4;
class EnvPosixTest : public testing::Test {
public:
static void SetFileLimits(int read_only_file_limit, int mmap_limit) {
EnvPosixTestHelper::SetReadOnlyFDLimit(read_only_file_limit);
EnvPosixTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvPosixTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvPosixTest, TestOpenOnRead) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = std::fopen(test_file.c_str(), "we");
ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
std::fclose(f);
const int kNumFiles = kReadOnlyFileLimit + kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
}
char scratch;
Slice read_result;
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]);
}
for (int i = 0; i < kNumFiles; i++) {
delete files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
}
#if HAVE_O_CLOEXEC
TEST_F(EnvPosixTest, TestCloseOnExecSequentialFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_sequential.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::SequentialFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewSequentialFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecRandomAccessFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_random_access.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::RandomAccessFile* mmapped_files[kMMapLimit];
for (int i = 0; i < kMMapLimit; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &mmapped_files[i]));
}
leveldb::RandomAccessFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
for (int i = 0; i < kMMapLimit; i++) {
delete mmapped_files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecWritableFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_writable.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewWritableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecAppendableFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_appendable.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::WritableFile* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewAppendableFile(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecLockFile) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_lock.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::FileLock* lock = nullptr;
ASSERT_LEVELDB_OK(env_->LockFile(file_path, &lock));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
ASSERT_LEVELDB_OK(env_->UnlockFile(lock));
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
TEST_F(EnvPosixTest, TestCloseOnExecLogger) {
std::unordered_set<int> open_fds;
GetOpenFileDescriptors(&open_fds);
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string file_path = test_dir + "/close_on_exec_logger.txt";
ASSERT_LEVELDB_OK(WriteStringToFile(env_, "0123456789", file_path));
leveldb::Logger* file = nullptr;
ASSERT_LEVELDB_OK(env_->NewLogger(file_path, &file));
CheckCloseOnExecDoesNotLeakFDs(open_fds);
delete file;
ASSERT_LEVELDB_OK(env_->RemoveFile(file_path));
}
#endif
}
int main(int argc, char** argv) {
#if HAVE_O_CLOEXEC
for (int i = 1; i < argc; ++i) {
if (!std::strcmp(argv[i], kTestCloseOnExecSwitch)) {
return TestCloseOnExecHelperMain(argv[i + 1]);
}
}
GetArgvZero()->assign(argv[0], argv[0] + std::strlen(argv[0]) + 1);
#endif
leveldb::EnvPosixTest::SetFileLimits(leveldb::kReadOnlyFileLimit,
leveldb::kMMapLimit);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_posix.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_posix_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
5306fe2a-a64a-4862-978a-1a7bd74402f1 | cpp | tensorflow/tensorflow | gcs_filesystem | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc | tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem_test.cc | #include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <stdlib.h>
#include <string.h>
#include <variant>
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/types/variant.h"
#include "google/cloud/storage/client.h"
#include "tensorflow/c/env.h"
#include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_helper.h"
#include "tensorflow/c/logging.h"
#include "tensorflow/c/tf_status.h"
namespace gcs = google::cloud::storage;
constexpr char kBlockSize[] = "GCS_READ_CACHE_BLOCK_SIZE_MB";
constexpr size_t kDefaultBlockSize = 64 * 1024 * 1024;
constexpr char kMaxCacheSize[] = "GCS_READ_CACHE_MAX_SIZE_MB";
constexpr size_t kDefaultMaxCacheSize = 0;
constexpr char kMaxStaleness[] = "GCS_READ_CACHE_MAX_STALENESS";
constexpr uint64_t kDefaultMaxStaleness = 0;
constexpr char kStatCacheMaxAge[] = "GCS_STAT_CACHE_MAX_AGE";
constexpr uint64_t kStatCacheDefaultMaxAge = 5;
constexpr char kStatCacheMaxEntries[] = "GCS_STAT_CACHE_MAX_ENTRIES";
constexpr size_t kStatCacheDefaultMaxEntries = 1024;
constexpr char kAppendMode[] = "GCS_APPEND_MODE";
constexpr char kComposeAppend[] = "compose";
static inline void TF_SetStatusFromGCSStatus(
const google::cloud::Status& gcs_status, TF_Status* status) {
TF_SetStatus(status, static_cast<TF_Code>(gcs_status.code()),
gcs_status.message().c_str());
}
static void* plugin_memory_allocate(size_t size) { return calloc(1, size); }
static void plugin_memory_free(void* ptr) { free(ptr); }
void ParseGCSPath(const std::string& fname, bool object_empty_ok,
std::string* bucket, std::string* object, TF_Status* status) {
size_t scheme_end = fname.find(":
if (fname.substr(0, scheme_end + 1) != "gs:
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't start with 'gs:
return;
}
size_t bucket_end = fname.find('/', scheme_end + 1);
if (bucket_end == std::string::npos) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain a bucket name.");
return;
}
*bucket = fname.substr(scheme_end + 1, bucket_end - scheme_end - 1);
*object = fname.substr(bucket_end + 1);
if (object->empty() && !object_empty_ok) {
TF_SetStatus(status, TF_INVALID_ARGUMENT,
"GCS path doesn't contain an object name.");
}
}
static void MaybeAppendSlash(std::string* name) {
if (name->empty())
*name = "/";
else if (name->back() != '/')
name->push_back('/');
}
static int64_t LoadBufferFromGCS(const std::string& path, size_t offset,
size_t buffer_size, char* buffer,
tf_gcs_filesystem::GCSFile* gcs_file,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return -1;
auto stream = gcs_file->gcs_client.ReadObject(
bucket, object, gcs::ReadRange(offset, offset + buffer_size));
TF_SetStatusFromGCSStatus(stream.status(), status);
if ((TF_GetCode(status) != TF_OK) &&
(TF_GetCode(status) != TF_OUT_OF_RANGE)) {
return -1;
}
int64_t read;
auto content_length = stream.headers().find("content-length");
if (content_length == stream.headers().end()) {
read = 0;
} else if (!absl::SimpleAtoi(content_length->second, &read)) {
TF_SetStatus(status, TF_UNKNOWN, "Could not get content-length header");
return -1;
}
TF_SetStatus(status, TF_OK, "");
TF_VLog(1, "Successful read of %s @ %u of size: %u", path.c_str(), offset,
read);
stream.read(buffer, read);
read = stream.gcount();
if (read < buffer_size) {
tf_gcs_filesystem::GcsFileStat stat;
if (gcs_file->stat_cache->Lookup(path, &stat)) {
if (offset + read < stat.base.length) {
TF_SetStatus(status, TF_INTERNAL,
absl::StrCat("File contents are inconsistent for file: ",
path, " @ ", offset)
.c_str());
}
TF_VLog(2, "Successful integrity check for: %s @ %u", path.c_str(),
offset);
}
}
return read;
}
namespace tf_random_access_file {
using ReadFn =
std::function<int64_t(const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status)>;
typedef struct GCSFile {
const std::string path;
const bool is_cache_enable;
const uint64_t buffer_size;
ReadFn read_fn;
absl::Mutex buffer_mutex;
uint64_t buffer_start ABSL_GUARDED_BY(buffer_mutex);
bool buffer_end_is_past_eof ABSL_GUARDED_BY(buffer_mutex);
std::string buffer ABSL_GUARDED_BY(buffer_mutex);
GCSFile(std::string path, bool is_cache_enable, uint64_t buffer_size,
ReadFn read_fn)
: path(path),
is_cache_enable(is_cache_enable),
buffer_size(buffer_size),
read_fn(std::move(read_fn)),
buffer_mutex(),
buffer_start(0),
buffer_end_is_past_eof(false),
buffer() {}
} GCSFile;
void Cleanup(TF_RandomAccessFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
int64_t Read(const TF_RandomAccessFile* file, uint64_t offset, size_t n,
char* buffer, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->is_cache_enable || n > gcs_file->buffer_size) {
return gcs_file->read_fn(gcs_file->path, offset, n, buffer, status);
} else {
absl::MutexLock l(&gcs_file->buffer_mutex);
size_t buffer_end = gcs_file->buffer_start + gcs_file->buffer.size();
size_t copy_size = 0;
if (offset < buffer_end && gcs_file->buffer_start) {
copy_size = (std::min)(n, static_cast<size_t>(buffer_end - offset));
memcpy(buffer,
gcs_file->buffer.data() + (offset - gcs_file->buffer_start),
copy_size);
}
bool consumed_buffer_to_eof =
offset + copy_size >= buffer_end && gcs_file->buffer_end_is_past_eof;
if (copy_size < n && !consumed_buffer_to_eof) {
gcs_file->buffer_start = offset + copy_size;
gcs_file->buffer.resize(gcs_file->buffer_size);
auto read_fill_buffer = gcs_file->read_fn(
gcs_file->path, gcs_file->buffer_start, gcs_file->buffer_size,
&(gcs_file->buffer[0]), status);
gcs_file->buffer_end_is_past_eof =
(TF_GetCode(status) == TF_OUT_OF_RANGE);
if (read_fill_buffer >= 0) gcs_file->buffer.resize(read_fill_buffer);
if (TF_GetCode(status) != TF_OK &&
TF_GetCode(status) != TF_OUT_OF_RANGE) {
gcs_file->buffer.resize(0);
return -1;
}
size_t remaining_copy =
(std::min)(n - copy_size, gcs_file->buffer.size());
memcpy(buffer + copy_size, gcs_file->buffer.data(), remaining_copy);
copy_size += remaining_copy;
}
if (copy_size < n) {
gcs_file->buffer_end_is_past_eof = false;
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
return copy_size;
}
TF_SetStatus(status, TF_OK, "");
return copy_size;
}
}
}
namespace tf_writable_file {
typedef struct GCSFile {
const std::string bucket;
const std::string object;
gcs::Client* gcs_client;
TempFile outfile;
bool sync_need;
int64_t offset;
} GCSFile;
static void SyncImpl(const std::string& bucket, const std::string& object,
int64_t* offset, TempFile* outfile,
gcs::Client* gcs_client, TF_Status* status) {
outfile->flush();
if (*offset == -1 || *offset == 0) {
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
if (*offset == 0) {
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
}
outfile->clear();
outfile->seekp(0, std::ios::end);
TF_SetStatus(status, TF_OK, "");
} else {
std::string temporary_object =
gcs::CreateRandomPrefixName("tf_writable_file_gcs");
auto metadata = gcs_client->UploadFile(outfile->getName(), bucket,
temporary_object, gcs::Fields(""));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
TF_VLog(3, "AppendObject: gs:
temporary_object.c_str(), bucket.c_str(), object.c_str());
const std::vector<gcs::ComposeSourceObject> source_objects = {
{object, {}, {}}, {temporary_object, {}, {}}};
metadata = gcs_client->ComposeObject(bucket, source_objects, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
auto delete_status = gcs_client->DeleteObject(bucket, temporary_object);
if (!delete_status.ok()) {
TF_SetStatusFromGCSStatus(delete_status, status);
return;
}
if (!outfile->truncate()) {
TF_SetStatus(status, TF_INTERNAL,
"Could not truncate internal temporary file.");
return;
}
*offset = static_cast<int64_t>(metadata->size());
TF_SetStatus(status, TF_OK, "");
}
}
void Cleanup(TF_WritableFile* file) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
delete gcs_file;
}
void Append(const TF_WritableFile* file, const char* buffer, size_t n,
TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (!gcs_file->outfile.is_open()) {
TF_SetStatus(status, TF_FAILED_PRECONDITION,
"The internal temporary file is not writable.");
return;
}
TF_VLog(3, "Append: gs:
gcs_file->object.c_str(), n);
gcs_file->sync_need = true;
gcs_file->outfile.write(buffer, n);
if (!gcs_file->outfile)
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
else
TF_SetStatus(status, TF_OK, "");
}
int64_t Tell(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
int64_t position = int64_t(gcs_file->outfile.tellp());
if (position == -1)
TF_SetStatus(status, TF_INTERNAL,
"tellp on the internal temporary file failed");
else
TF_SetStatus(status, TF_OK, "");
return position == -1
? -1
: position + (gcs_file->offset == -1 ? 0 : gcs_file->offset);
}
void Flush(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
if (gcs_file->sync_need) {
TF_VLog(3, "Flush started: gs:
gcs_file->object.c_str());
if (!gcs_file->outfile) {
TF_SetStatus(status, TF_INTERNAL,
"Could not append to the internal temporary file.");
return;
}
SyncImpl(gcs_file->bucket, gcs_file->object, &gcs_file->offset,
&gcs_file->outfile, gcs_file->gcs_client, status);
TF_VLog(3, "Flush finished: gs:
gcs_file->object.c_str());
if (TF_GetCode(status) != TF_OK) return;
gcs_file->sync_need = false;
} else {
TF_SetStatus(status, TF_OK, "");
}
}
void Sync(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Sync: gs:
gcs_file->object.c_str());
Flush(file, status);
}
void Close(const TF_WritableFile* file, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(file->plugin_file);
TF_VLog(3, "Close: gs:
gcs_file->object.c_str());
if (gcs_file->sync_need) {
Flush(file, status);
}
gcs_file->outfile.close();
}
}
namespace tf_read_only_memory_region {
typedef struct GCSMemoryRegion {
const void* const address;
const uint64_t length;
} GCSMemoryRegion;
void Cleanup(TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
plugin_memory_free(const_cast<void*>(r->address));
delete r;
}
const void* Data(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->address;
}
uint64_t Length(const TF_ReadOnlyMemoryRegion* region) {
auto r = static_cast<GCSMemoryRegion*>(region->plugin_memory_region);
return r->length;
}
}
namespace tf_gcs_filesystem {
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client)
: gcs_client(gcs_client), block_cache_lock() {
const char* append_mode = std::getenv(kAppendMode);
compose = (append_mode != nullptr) && (!strcmp(kAppendMode, append_mode));
uint64_t value;
block_size = kDefaultBlockSize;
size_t max_bytes = kDefaultMaxCacheSize;
uint64_t max_staleness = kDefaultMaxStaleness;
const char* block_size_env = std::getenv(kBlockSize);
if (block_size_env && absl::SimpleAtoi(block_size_env, &value)) {
block_size = value * 1024 * 1024;
}
const char* max_bytes_env = std::getenv(kMaxCacheSize);
if (max_bytes_env && absl::SimpleAtoi(max_bytes_env, &value)) {
max_bytes = static_cast<size_t>(value * 1024 * 1024);
}
const char* max_staleness_env = std::getenv(kMaxStaleness);
if (max_staleness_env && absl::SimpleAtoi(max_staleness_env, &value)) {
max_staleness = value;
}
TF_VLog(1, "GCS cache max size = %u ; block size = %u ; max staleness = %u",
max_bytes, block_size, max_staleness);
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
uint64_t stat_cache_max_age = kStatCacheDefaultMaxAge;
size_t stat_cache_max_entries = kStatCacheDefaultMaxEntries;
const char* stat_cache_max_age_env = std::getenv(kStatCacheMaxAge);
if (stat_cache_max_age_env &&
absl::SimpleAtoi(stat_cache_max_age_env, &value)) {
stat_cache_max_age = value;
}
const char* stat_cache_max_entries_env = std::getenv(kStatCacheMaxEntries);
if (stat_cache_max_entries_env &&
absl::SimpleAtoi(stat_cache_max_entries_env, &value)) {
stat_cache_max_entries = static_cast<size_t>(value);
}
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
GCSFile::GCSFile(google::cloud::storage::Client&& gcs_client, bool compose,
uint64_t block_size, size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries)
: gcs_client(gcs_client),
compose(compose),
block_cache_lock(),
block_size(block_size) {
file_block_cache = std::make_unique<RamFileBlockCache>(
block_size, max_bytes, max_staleness,
[this](const std::string& filename, size_t offset, size_t buffer_size,
char* buffer, TF_Status* status) {
return LoadBufferFromGCS(filename, offset, buffer_size, buffer, this,
status);
});
stat_cache = std::make_unique<ExpiringLRUCache<GcsFileStat>>(
stat_cache_max_age, stat_cache_max_entries);
}
void InitTest(TF_Filesystem* filesystem, bool compose, uint64_t block_size,
size_t max_bytes, uint64_t max_staleness,
uint64_t stat_cache_max_age, size_t stat_cache_max_entries,
TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem =
new GCSFile(std::move(client.value()), compose, block_size, max_bytes,
max_staleness, stat_cache_max_age, stat_cache_max_entries);
TF_SetStatus(status, TF_OK, "");
}
void Init(TF_Filesystem* filesystem, TF_Status* status) {
google::cloud::StatusOr<gcs::Client> client =
gcs::Client::CreateDefaultClient();
if (!client) {
TF_SetStatusFromGCSStatus(client.status(), status);
return;
}
filesystem->plugin_filesystem = new GCSFile(std::move(client.value()));
TF_SetStatus(status, TF_OK, "");
}
void Cleanup(TF_Filesystem* filesystem) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
delete gcs_file;
}
static void UncachedStatForObject(const std::string& bucket,
const std::string& object, GcsFileStat* stat,
gcs::Client* gcs_client, TF_Status* status) {
auto metadata = gcs_client->GetObjectMetadata(
bucket, object, gcs::Fields("generation,size,timeStorageClassUpdated"));
if (!metadata) return TF_SetStatusFromGCSStatus(metadata.status(), status);
stat->generation_number = metadata->generation();
stat->base.length = metadata->size();
stat->base.mtime_nsec =
metadata->time_storage_class_updated().time_since_epoch().count();
stat->base.is_directory = object.back() == '/';
TF_VLog(1,
"Stat of: gs:
bucket.c_str(), object.c_str(), stat->base.length,
stat->generation_number, stat->base.mtime_nsec);
return TF_SetStatus(status, TF_OK, "");
}
void NewRandomAccessFile(const TF_Filesystem* filesystem, const char* path,
TF_RandomAccessFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
bool is_cache_enabled;
{
absl::MutexLock l(&gcs_file->block_cache_lock);
is_cache_enabled = gcs_file->file_block_cache->IsCacheEnabled();
}
auto read_fn = [gcs_file, is_cache_enabled, bucket, object](
const std::string& path, uint64_t offset, size_t n,
char* buffer, TF_Status* status) -> int64_t {
int64_t read = 0;
if (is_cache_enabled) {
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
GcsFileStat stat;
gcs_file->stat_cache->LookupOrCompute(
path, &stat,
[gcs_file, bucket, object](const std::string& path, GcsFileStat* stat,
TF_Status* status) {
UncachedStatForObject(bucket, object, stat, &gcs_file->gcs_client,
status);
},
status);
if (TF_GetCode(status) != TF_OK) return -1;
if (!gcs_file->file_block_cache->ValidateAndUpdateFileSignature(
path, stat.generation_number)) {
TF_VLog(
1,
"File signature has been changed. Refreshing the cache. Path: %s",
path.c_str());
}
read = gcs_file->file_block_cache->Read(path, offset, n, buffer, status);
} else {
read = LoadBufferFromGCS(path, offset, n, buffer, gcs_file, status);
}
if (TF_GetCode(status) != TF_OK) return -1;
if (read < n)
TF_SetStatus(status, TF_OUT_OF_RANGE, "Read less bytes than requested");
else
TF_SetStatus(status, TF_OK, "");
return read;
};
file->plugin_file = new tf_random_access_file::GCSFile(
std::move(path), is_cache_enabled, gcs_file->block_size, read_fn);
TF_SetStatus(status, TF_OK, "");
}
void NewWritableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name = TF_GetTempFileName("");
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::out), true,
(gcs_file->compose ? 0 : -1)});
free(temp_file_name);
TF_VLog(3, "GcsWritableFile: %s", path);
TF_SetStatus(status, TF_OK, "");
}
void NewAppendableFile(const TF_Filesystem* filesystem, const char* path,
TF_WritableFile* file, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
char* temp_file_name_c_str = TF_GetTempFileName("");
std::string temp_file_name(temp_file_name_c_str);
free(temp_file_name_c_str);
if (!gcs_file->compose) {
auto gcs_status =
gcs_file->gcs_client.DownloadToFile(bucket, object, temp_file_name);
TF_SetStatusFromGCSStatus(gcs_status, status);
auto status_code = TF_GetCode(status);
if (status_code != TF_OK && status_code != TF_NOT_FOUND) return;
bool sync_need = (status_code == TF_NOT_FOUND);
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::app), sync_need,
-1});
} else {
auto metadata = gcs_file->gcs_client.GetObjectMetadata(bucket, object,
gcs::Fields("size"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) == TF_OK) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), false,
static_cast<int64_t>(metadata->size())});
} else if (TF_GetCode(status) == TF_NOT_FOUND) {
file->plugin_file = new tf_writable_file::GCSFile(
{std::move(bucket), std::move(object), &gcs_file->gcs_client,
TempFile(temp_file_name, std::ios::binary | std::ios::trunc), true,
0});
} else {
return;
}
}
TF_VLog(3, "GcsWritableFile: %s with existing file %s", path,
temp_file_name.c_str());
TF_SetStatus(status, TF_OK, "");
}
void NewReadOnlyMemoryRegionFromFile(const TF_Filesystem* filesystem,
const char* path,
TF_ReadOnlyMemoryRegion* region,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.GetObjectMetadata(bucket, object,
gcs::Fields("size"));
if (!metadata) {
TF_SetStatusFromGCSStatus(metadata.status(), status);
return;
}
TF_RandomAccessFile reader;
NewRandomAccessFile(filesystem, path, &reader, status);
if (TF_GetCode(status) != TF_OK) return;
char* buffer = static_cast<char*>(plugin_memory_allocate(metadata->size()));
int64_t read =
tf_random_access_file::Read(&reader, 0, metadata->size(), buffer, status);
tf_random_access_file::Cleanup(&reader);
if (TF_GetCode(status) != TF_OK) return;
if (read > 0 && buffer) {
region->plugin_memory_region =
new tf_read_only_memory_region::GCSMemoryRegion(
{buffer, static_cast<uint64_t>(read)});
TF_SetStatus(status, TF_OK, "");
} else if (read == 0) {
TF_SetStatus(status, TF_INVALID_ARGUMENT, "File is empty");
}
}
static void StatForObject(GCSFile* gcs_file, const std::string& path,
const std::string& bucket, const std::string& object,
GcsFileStat* stat, TF_Status* status) {
if (object.empty())
return TF_SetStatus(
status, TF_INVALID_ARGUMENT,
absl::StrCat("'object' must be a non-empty string. (File: ", path, ")")
.c_str());
TF_SetStatus(status, TF_OK, "");
gcs_file->stat_cache->LookupOrCompute(
path, stat,
[gcs_file, bucket, object](const std::string& path, GcsFileStat* stat,
TF_Status* status) {
UncachedStatForObject(bucket, object, stat, &gcs_file->gcs_client,
status);
},
status);
}
static bool ObjectExists(GCSFile* gcs_file, const std::string& path,
const std::string& bucket, const std::string& object,
TF_Status* status) {
GcsFileStat stat;
StatForObject(gcs_file, path, bucket, object, &stat, status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_NOT_FOUND)
return false;
if (TF_GetCode(status) == TF_NOT_FOUND) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return !stat.base.is_directory;
}
static bool BucketExists(GCSFile* gcs_file, const std::string& bucket,
TF_Status* status) {
auto metadata =
gcs_file->gcs_client.GetBucketMetadata(bucket, gcs::Fields(""));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_NOT_FOUND)
return false;
if (TF_GetCode(status) == TF_NOT_FOUND) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return true;
}
static std::vector<std::string> GetChildrenBounded(
GCSFile* gcs_file, std::string dir, uint64_t max_results, bool recursive,
bool include_self_directory_marker, TF_Status* status) {
std::string bucket, prefix;
MaybeAppendSlash(&dir);
ParseGCSPath(dir, true, &bucket, &prefix, status);
std::vector<std::string> result;
uint64_t count = 0;
std::string delimiter = recursive ? "" : "/";
for (auto&& item : gcs_file->gcs_client.ListObjectsAndPrefixes(
bucket, gcs::Prefix(prefix), gcs::Delimiter(delimiter),
gcs::Fields("items(name),prefixes"))) {
if (count == max_results) {
TF_SetStatus(status, TF_OK, "");
return result;
}
if (!item) {
TF_SetStatusFromGCSStatus(item.status(), status);
return result;
}
auto value = *std::move(item);
std::string children = std::holds_alternative<std::string>(value)
? std::get<std::string>(value)
: std::get<gcs::ObjectMetadata>(value).name();
auto pos = children.find(prefix);
if (pos != 0) {
TF_SetStatus(status, TF_INTERNAL,
absl::StrCat("Unexpected response: the returned file name ",
children, " doesn't match the prefix ", prefix)
.c_str());
return result;
}
children.erase(0, prefix.length());
if (!children.empty() || include_self_directory_marker) {
result.emplace_back(children);
}
++count;
}
return result;
}
static bool FolderExists(GCSFile* gcs_file, std::string dir,
TF_Status* status) {
ExpiringLRUCache<GcsFileStat>::ComputeFunc compute_func =
[gcs_file](const std::string& dir, GcsFileStat* stat, TF_Status* status) {
auto children =
GetChildrenBounded(gcs_file, dir, 1, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
if (!children.empty()) {
stat->base = {0, 0, true};
return TF_SetStatus(status, TF_OK, "");
} else {
return TF_SetStatus(status, TF_INVALID_ARGUMENT, "Not a directory!");
}
};
GcsFileStat stat;
MaybeAppendSlash(&dir);
gcs_file->stat_cache->LookupOrCompute(dir, &stat, compute_func, status);
if (TF_GetCode(status) != TF_OK && TF_GetCode(status) != TF_INVALID_ARGUMENT)
return false;
if (TF_GetCode(status) == TF_INVALID_ARGUMENT) {
TF_SetStatus(status, TF_OK, "");
return false;
}
return true;
}
static void ClearFileCaches(GCSFile* gcs_file, const std::string& path) {
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
gcs_file->file_block_cache->RemoveFile(path);
gcs_file->stat_cache->Delete(path);
}
void PathExists(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool result = BucketExists(gcs_file, bucket, status);
if (result) return TF_SetStatus(status, TF_OK, "");
}
GcsFileStat stat;
StatForObject(gcs_file, path, bucket, object, &stat, status);
if (TF_GetCode(status) != TF_NOT_FOUND) return;
bool result = FolderExists(gcs_file, path, status);
if (TF_GetCode(status) != TF_OK || (TF_GetCode(status) == TF_OK && result))
return;
return TF_SetStatus(
status, TF_NOT_FOUND,
absl::StrCat("The path ", path, " does not exist.").c_str());
}
void CreateDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string dir = path;
MaybeAppendSlash(&dir);
TF_VLog(3,
"CreateDir: creating directory with path: %s and "
"path_with_slash: %s",
path, dir.c_str());
std::string bucket, object;
ParseGCSPath(dir, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool is_directory = BucketExists(gcs_file, bucket, status);
if (TF_GetCode(status) != TF_OK) return;
if (!is_directory)
TF_SetStatus(status, TF_NOT_FOUND,
absl::StrCat("The specified bucket ", dir, " was not found.")
.c_str());
return;
}
PathExists(filesystem, dir.c_str(), status);
if (TF_GetCode(status) == TF_OK) {
TF_VLog(3, "CreateDir: directory already exists, not uploading %s", path);
return TF_SetStatus(status, TF_ALREADY_EXISTS, path);
}
auto metadata = gcs_file->gcs_client.InsertObject(
bucket, object, "",
gcs::IfGenerationMatch(0), gcs::Fields(""));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) == TF_FAILED_PRECONDITION)
TF_SetStatus(status, TF_ALREADY_EXISTS, path);
}
void DeleteFile(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto gcs_status = gcs_file->gcs_client.DeleteObject(bucket, object);
TF_SetStatusFromGCSStatus(gcs_status, status);
if (TF_GetCode(status) == TF_OK) ClearFileCaches(gcs_file, path);
}
void DeleteDir(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto childrens = GetChildrenBounded(gcs_file, path, 2, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
if (childrens.size() > 1 || (childrens.size() == 1 && !childrens[0].empty()))
return TF_SetStatus(status, TF_FAILED_PRECONDITION,
"Cannot delete a non-empty directory.");
if (childrens.size() == 1 && childrens[0].empty()) {
std::string dir = path;
MaybeAppendSlash(&dir);
DeleteFile(filesystem, dir.c_str(), status);
return;
}
TF_SetStatus(status, TF_OK, "");
}
void CopyFile(const TF_Filesystem* filesystem, const char* src, const char* dst,
TF_Status* status) {
std::string bucket_src, object_src;
ParseGCSPath(src, false, &bucket_src, &object_src, status);
if (TF_GetCode(status) != TF_OK) return;
std::string bucket_dst, object_dst;
ParseGCSPath(dst, false, &bucket_dst, &object_dst, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.RewriteObjectBlocking(
bucket_src, object_src, bucket_dst, object_dst,
gcs::Fields("done,rewriteToken"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
}
bool IsDirectory(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return false;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
bool result = BucketExists(gcs_file, bucket, status);
if (TF_GetCode(status) != TF_OK) return false;
if (!result)
TF_SetStatus(
status, TF_NOT_FOUND,
absl::StrCat("The specified bucket gs:
.c_str());
return result;
}
bool is_folder = FolderExists(gcs_file, path, status);
if (TF_GetCode(status) != TF_OK) return false;
if (is_folder) return true;
bool is_object = ObjectExists(gcs_file, path, bucket, object, status);
if (TF_GetCode(status) != TF_OK) return false;
if (is_object) {
TF_SetStatus(
status, TF_FAILED_PRECONDITION,
absl::StrCat("The specified path ", path, " is not a directory.")
.c_str());
return false;
}
TF_SetStatus(status, TF_NOT_FOUND,
absl::StrCat("The path ", path, " does not exist.").c_str());
return false;
}
static void RenameObject(const TF_Filesystem* filesystem,
const std::string& src, const std::string& dst,
TF_Status* status) {
TF_VLog(3, "RenameObject: started %s to %s", src.c_str(), dst.c_str());
std::string bucket_src, object_src;
ParseGCSPath(src, false, &bucket_src, &object_src, status);
if (TF_GetCode(status) != TF_OK) return;
std::string bucket_dst, object_dst;
ParseGCSPath(dst, false, &bucket_dst, &object_dst, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
auto metadata = gcs_file->gcs_client.RewriteObjectBlocking(
bucket_src, object_src, bucket_dst, object_dst,
gcs::Fields("done,rewriteToken"));
TF_SetStatusFromGCSStatus(metadata.status(), status);
if (TF_GetCode(status) != TF_OK) return;
TF_VLog(3, "RenameObject: finished %s to %s", src.c_str(), dst.c_str());
ClearFileCaches(gcs_file, dst);
DeleteFile(filesystem, src.c_str(), status);
}
void RenameFile(const TF_Filesystem* filesystem, const char* src,
const char* dst, TF_Status* status) {
if (!IsDirectory(filesystem, src, status)) {
if (TF_GetCode(status) == TF_FAILED_PRECONDITION) {
TF_SetStatus(status, TF_OK, "");
RenameObject(filesystem, src, dst, status);
}
return;
}
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, src, UINT64_MAX, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
std::string src_dir = src;
std::string dst_dir = dst;
MaybeAppendSlash(&src_dir);
MaybeAppendSlash(&dst_dir);
for (const std::string& children : childrens) {
RenameObject(filesystem, src_dir + children, dst_dir + children, status);
if (TF_GetCode(status) != TF_OK) return;
}
TF_SetStatus(status, TF_OK, "");
}
void DeleteRecursively(const TF_Filesystem* filesystem, const char* path,
uint64_t* undeleted_files, uint64_t* undeleted_dirs,
TF_Status* status) {
if (!undeleted_files || !undeleted_dirs)
return TF_SetStatus(
status, TF_INTERNAL,
"'undeleted_files' and 'undeleted_dirs' cannot be nullptr.");
*undeleted_files = 0;
*undeleted_dirs = 0;
if (!IsDirectory(filesystem, path, status)) {
*undeleted_dirs = 1;
return;
}
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, path, UINT64_MAX, true, true, status);
if (TF_GetCode(status) != TF_OK) return;
std::string dir = path;
MaybeAppendSlash(&dir);
for (const std::string& children : childrens) {
const std::string& full_path = dir + children;
DeleteFile(filesystem, full_path.c_str(), status);
if (TF_GetCode(status) != TF_OK) {
if (IsDirectory(filesystem, full_path.c_str(), status))
(*undeleted_dirs)++;
else
(*undeleted_files)++;
}
}
}
int GetChildren(const TF_Filesystem* filesystem, const char* path,
char*** entries, TF_Status* status) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
std::vector<std::string> childrens =
GetChildrenBounded(gcs_file, path, UINT64_MAX, false, false, status);
if (TF_GetCode(status) != TF_OK) return -1;
int num_entries = childrens.size();
*entries = static_cast<char**>(
plugin_memory_allocate(num_entries * sizeof((*entries)[0])));
for (int i = 0; i < num_entries; i++)
(*entries)[i] = strdup(childrens[i].c_str());
TF_SetStatus(status, TF_OK, "");
return num_entries;
}
void Stat(const TF_Filesystem* filesystem, const char* path,
TF_FileStatistics* stats, TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return;
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
if (object.empty()) {
auto bucket_metadata =
gcs_file->gcs_client.GetBucketMetadata(bucket, gcs::Fields(""));
TF_SetStatusFromGCSStatus(bucket_metadata.status(), status);
if (TF_GetCode(status) == TF_OK) {
stats->is_directory = true;
stats->length = 0;
stats->mtime_nsec = 0;
}
return;
}
if (IsDirectory(filesystem, path, status)) {
stats->is_directory = true;
stats->length = 0;
stats->mtime_nsec = 0;
return TF_SetStatus(status, TF_OK, "");
}
if (TF_GetCode(status) == TF_FAILED_PRECONDITION) {
auto metadata = gcs_file->gcs_client.GetObjectMetadata(
bucket, object, gcs::Fields("size,timeStorageClassUpdated"));
if (metadata) {
stats->is_directory = false;
stats->length = metadata.value().size();
stats->mtime_nsec = metadata.value()
.time_storage_class_updated()
.time_since_epoch()
.count();
}
TF_SetStatusFromGCSStatus(metadata.status(), status);
}
}
int64_t GetFileSize(const TF_Filesystem* filesystem, const char* path,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) return -1;
TF_FileStatistics stat;
Stat(filesystem, path, &stat, status);
return stat.length;
}
static char* TranslateName(const TF_Filesystem* filesystem, const char* uri) {
return strdup(uri);
}
static void FlushCaches(const TF_Filesystem* filesystem) {
auto gcs_file = static_cast<GCSFile*>(filesystem->plugin_filesystem);
absl::ReaderMutexLock l(&gcs_file->block_cache_lock);
gcs_file->file_block_cache->Flush();
gcs_file->stat_cache->Clear();
}
}
static void ProvideFilesystemSupportFor(TF_FilesystemPluginOps* ops,
const char* uri) {
TF_SetFilesystemVersionMetadata(ops);
ops->scheme = strdup(uri);
ops->random_access_file_ops = static_cast<TF_RandomAccessFileOps*>(
plugin_memory_allocate(TF_RANDOM_ACCESS_FILE_OPS_SIZE));
ops->random_access_file_ops->cleanup = tf_random_access_file::Cleanup;
ops->random_access_file_ops->read = tf_random_access_file::Read;
ops->writable_file_ops = static_cast<TF_WritableFileOps*>(
plugin_memory_allocate(TF_WRITABLE_FILE_OPS_SIZE));
ops->writable_file_ops->cleanup = tf_writable_file::Cleanup;
ops->read_only_memory_region_ops = static_cast<TF_ReadOnlyMemoryRegionOps*>(
plugin_memory_allocate(TF_READ_ONLY_MEMORY_REGION_OPS_SIZE));
ops->read_only_memory_region_ops->cleanup =
tf_read_only_memory_region::Cleanup;
ops->read_only_memory_region_ops->data = tf_read_only_memory_region::Data;
ops->read_only_memory_region_ops->length = tf_read_only_memory_region::Length;
ops->filesystem_ops = static_cast<TF_FilesystemOps*>(
plugin_memory_allocate(TF_FILESYSTEM_OPS_SIZE));
ops->filesystem_ops->init = tf_gcs_filesystem::Init;
ops->filesystem_ops->cleanup = tf_gcs_filesystem::Cleanup;
ops->filesystem_ops->new_random_access_file =
tf_gcs_filesystem::NewRandomAccessFile;
ops->filesystem_ops->new_writable_file = tf_gcs_filesystem::NewWritableFile;
ops->filesystem_ops->new_appendable_file =
tf_gcs_filesystem::NewAppendableFile;
ops->filesystem_ops->new_read_only_memory_region_from_file =
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile;
ops->filesystem_ops->create_dir = tf_gcs_filesystem::CreateDir;
ops->filesystem_ops->delete_file = tf_gcs_filesystem::DeleteFile;
ops->filesystem_ops->delete_dir = tf_gcs_filesystem::DeleteDir;
ops->filesystem_ops->delete_recursively =
tf_gcs_filesystem::DeleteRecursively;
ops->filesystem_ops->copy_file = tf_gcs_filesystem::CopyFile;
ops->filesystem_ops->path_exists = tf_gcs_filesystem::PathExists;
ops->filesystem_ops->is_directory = tf_gcs_filesystem::IsDirectory;
ops->filesystem_ops->stat = tf_gcs_filesystem::Stat;
ops->filesystem_ops->get_children = tf_gcs_filesystem::GetChildren;
ops->filesystem_ops->translate_name = tf_gcs_filesystem::TranslateName;
ops->filesystem_ops->flush_caches = tf_gcs_filesystem::FlushCaches;
}
void TF_InitPlugin(TF_FilesystemPluginInfo* info) {
info->plugin_memory_allocate = plugin_memory_allocate;
info->plugin_memory_free = plugin_memory_free;
info->num_schemes = 1;
info->ops = static_cast<TF_FilesystemPluginOps*>(
plugin_memory_allocate(info->num_schemes * sizeof(info->ops[0])));
ProvideFilesystemSupportFor(&info->ops[0], "gs");
} | #include "tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.h"
#include <random>
#include "absl/strings/string_view.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/stacktrace_handler.h"
#include "tensorflow/core/platform/test.h"
#define ASSERT_TF_OK(x) ASSERT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
#define EXPECT_TF_OK(x) EXPECT_EQ(TF_OK, TF_GetCode(x)) << TF_Message(x)
static const char* content = "abcdefghijklmnopqrstuvwxyz1234567890";
static const absl::string_view content_view = content;
namespace gcs = google::cloud::storage;
static std::string InitializeTmpDir() {
const char* test_dir = getenv("GCS_TEST_TMPDIR");
if (test_dir != nullptr) {
std::string bucket, object;
TF_Status* status = TF_NewStatus();
ParseGCSPath(test_dir, true, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK) {
TF_DeleteStatus(status);
return "";
}
TF_DeleteStatus(status);
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> distribution;
std::string rng_val = std::to_string(distribution(gen));
return tensorflow::io::JoinPath(std::string(test_dir), rng_val);
} else {
return "";
}
}
static std::string* GetTmpDir() {
static std::string tmp_dir = InitializeTmpDir();
if (tmp_dir == "")
return nullptr;
else
return &tmp_dir;
}
namespace tensorflow {
namespace {
class GCSFilesystemTest : public ::testing::Test {
public:
void SetUp() override {
root_dir_ = io::JoinPath(
*GetTmpDir(),
::testing::UnitTest::GetInstance()->current_test_info()->name());
status_ = TF_NewStatus();
filesystem_ = new TF_Filesystem;
filesystem_->plugin_filesystem = nullptr;
}
void TearDown() override {
TF_DeleteStatus(status_);
if (filesystem_->plugin_filesystem != nullptr)
tf_gcs_filesystem::Cleanup(filesystem_);
delete filesystem_;
}
std::string GetURIForPath(absl::string_view path) {
const std::string translated_name =
tensorflow::io::JoinPath(root_dir_, path);
return translated_name;
}
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile* file)>
GetWriter() {
std::unique_ptr<TF_WritableFile, void (*)(TF_WritableFile * file)> writer(
new TF_WritableFile, [](TF_WritableFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr) tf_writable_file::Cleanup(file);
delete file;
}
});
writer->plugin_file = nullptr;
return writer;
}
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile* file)>
GetReader() {
std::unique_ptr<TF_RandomAccessFile, void (*)(TF_RandomAccessFile * file)>
reader(new TF_RandomAccessFile, [](TF_RandomAccessFile* file) {
if (file != nullptr) {
if (file->plugin_file != nullptr)
tf_random_access_file::Cleanup(file);
delete file;
}
});
reader->plugin_file = nullptr;
return reader;
}
void WriteString(const std::string& path, const std::string& content) {
auto writer = GetWriter();
tf_gcs_filesystem::NewWritableFile(filesystem_, path.c_str(), writer.get(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Append(writer.get(), content.c_str(), content.length(),
status_);
if (TF_GetCode(status_) != TF_OK) return;
tf_writable_file::Close(writer.get(), status_);
if (TF_GetCode(status_) != TF_OK) return;
}
std::string ReadAll(const std::string& path) {
auto reader = GetReader();
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(),
reader.get(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
auto file_size =
tf_gcs_filesystem::GetFileSize(filesystem_, path.c_str(), status_);
if (TF_GetCode(status_) != TF_OK) return "";
std::string content;
content.resize(file_size);
auto read = tf_random_access_file::Read(reader.get(), 0, file_size,
&content[0], status_);
if (TF_GetCode(status_) != TF_OK) return "";
if (read >= 0) content.resize(read);
if (file_size != content.size())
TF_SetStatus(
status_, TF_DATA_LOSS,
std::string("expected " + std::to_string(file_size) + " got " +
std::to_string(content.size()) + " bytes")
.c_str());
return content;
}
protected:
TF_Filesystem* filesystem_;
TF_Status* status_;
private:
std::string root_dir_;
};
::testing::AssertionResult WriteToServer(const std::string& path, size_t offset,
size_t length, gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto writer = gcs_client->WriteObject(bucket, object);
writer.write(content + offset, length);
writer.Close();
if (writer.metadata()) {
return ::testing::AssertionSuccess();
} else {
return ::testing::AssertionFailure()
<< writer.metadata().status().message();
}
}
::testing::AssertionResult InsertObject(const std::string& path,
const std::string& content,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto metadata = gcs_client->InsertObject(bucket, object, content);
if (metadata)
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure() << metadata.status().message();
}
::testing::AssertionResult CompareSubString(int64_t offset, size_t length,
absl::string_view result,
size_t read) {
if (length == read && content_view.substr(offset, length) ==
absl::string_view(result).substr(0, read))
return ::testing::AssertionSuccess();
else
return ::testing::AssertionFailure()
<< "Result: " << absl::string_view(result).substr(0, read)
<< " Read: " << read;
}
::testing::AssertionResult CompareWithServer(const std::string& path,
size_t offset, size_t length,
gcs::Client* gcs_client,
TF_Status* status) {
std::string bucket, object;
ParseGCSPath(path, false, &bucket, &object, status);
if (TF_GetCode(status) != TF_OK)
return ::testing::AssertionFailure() << TF_Message(status);
auto reader = gcs_client->ReadObject(bucket, object);
if (!reader) {
return ::testing::AssertionFailure() << reader.status().message();
} else {
std::string content{std::istreambuf_iterator<char>{reader}, {}};
return CompareSubString(offset, length, content, content.length());
}
}
TEST_F(GCSFilesystemTest, ParseGCSPath) {
std::string bucket, object;
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ASSERT_EQ(object, "path/to/object");
ParseGCSPath("gs:
ASSERT_TF_OK(status_);
ASSERT_EQ(bucket, "bucket");
ParseGCSPath("bucket/path/to/object", false, &bucket, &object, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
ParseGCSPath("gs:
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT);
}
TEST_F(GCSFilesystemTest, RandomAccessFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
char* result = new char[content_view.length()];
int64_t read = tf_random_access_file::Read(file, 0, 1, result, status_);
ASSERT_EQ(read, -1) << "Read: " << read;
ASSERT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(filepath, 0, content_view.length(),
&gcs_file->gcs_client, status_));
read = tf_random_access_file::Read(file, 0, content_view.length(), result,
status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, content_view.length(), result, read));
read = tf_random_access_file::Read(file, 0, 4, result, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(CompareSubString(0, 4, result, read));
read = tf_random_access_file::Read(file, content_view.length() - 2, 4, result,
status_);
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
ASSERT_TRUE(CompareSubString(content_view.length() - 2, 2, result, read));
delete[] result;
tf_random_access_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, WritableFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string filepath = GetURIForPath("a_file");
TF_WritableFile* file = new TF_WritableFile;
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
auto length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
gcs_file->compose = true;
filepath = GetURIForPath("b_file");
tf_gcs_filesystem::NewWritableFile(filesystem_, filepath.c_str(), file,
status_);
ASSERT_TF_OK(status_);
tf_writable_file::Append(file, content, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 4);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 4, &gcs_file->gcs_client, status_));
tf_writable_file::Append(file, content + 4, 4, status_);
ASSERT_TF_OK(status_);
length = tf_writable_file::Tell(file, status_);
ASSERT_EQ(length, 8);
ASSERT_TF_OK(status_);
tf_writable_file::Flush(file, status_);
ASSERT_TF_OK(status_);
ASSERT_TRUE(
CompareWithServer(filepath, 0, 8, &gcs_file->gcs_client, status_));
tf_writable_file::Close(file, status_);
ASSERT_TF_OK(status_);
tf_writable_file::Cleanup(file);
delete file;
}
TEST_F(GCSFilesystemTest, ReadOnlyMemoryRegion) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(WriteToServer(path, 0, 0, &gcs_file->gcs_client, status_));
TF_ReadOnlyMemoryRegion* region = new TF_ReadOnlyMemoryRegion;
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_EQ(TF_GetCode(status_), TF_INVALID_ARGUMENT) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
ASSERT_TRUE(WriteToServer(path, 0, content_view.length(),
&gcs_file->gcs_client, status_));
tf_gcs_filesystem::NewReadOnlyMemoryRegionFromFile(filesystem_, path.c_str(),
region, status_);
ASSERT_TF_OK(status_);
auto length = tf_read_only_memory_region::Length(region);
ASSERT_EQ(length, content_view.length());
auto data =
static_cast<const char*>(tf_read_only_memory_region::Data(region));
ASSERT_TRUE(CompareSubString(0, content_view.length(), data, length));
tf_read_only_memory_region::Cleanup(region);
delete region;
}
TEST_F(GCSFilesystemTest, PathExists) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("PathExists");
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_NOT_FOUND, TF_GetCode(status_)) << TF_Message(status_);
TF_SetStatus(status_, TF_OK, "");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
}
TEST_F(GCSFilesystemTest, GetChildren) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string base = GetURIForPath("GetChildren");
tf_gcs_filesystem::CreateDir(filesystem_, base.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string file = io::JoinPath(base, "TestFile.csv");
WriteString(file, "test");
EXPECT_TF_OK(status_);
const std::string subdir = io::JoinPath(base, "SubDir");
tf_gcs_filesystem::CreateDir(filesystem_, subdir.c_str(), status_);
EXPECT_TF_OK(status_);
const std::string subfile = io::JoinPath(subdir, "TestSubFile.csv");
WriteString(subfile, "test");
EXPECT_TF_OK(status_);
char** entries;
auto num_entries = tf_gcs_filesystem::GetChildren(filesystem_, base.c_str(),
&entries, status_);
EXPECT_TF_OK(status_);
std::vector<std::string> childrens;
for (int i = 0; i < num_entries; ++i) {
childrens.push_back(entries[i]);
}
std::sort(childrens.begin(), childrens.end());
EXPECT_EQ(std::vector<string>({"SubDir/", "TestFile.csv"}), childrens);
}
TEST_F(GCSFilesystemTest, DeleteFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("DeleteFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteFile(filesystem_, path.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, path.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND);
}
TEST_F(GCSFilesystemTest, CreateDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("CreateDir");
tf_gcs_filesystem::CreateDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_TRUE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, DeleteDir) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string dir = GetURIForPath("DeleteDir");
const std::string file = io::JoinPath(dir, "DeleteDirFile.csv");
WriteString(file, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_EQ(TF_GetCode(status_), TF_FAILED_PRECONDITION);
TF_SetStatus(status_, TF_OK, "");
tf_gcs_filesystem::DeleteFile(filesystem_, file.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::DeleteDir(filesystem_, dir.c_str(), status_);
EXPECT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, dir.c_str(), &stat, status_);
EXPECT_EQ(TF_GetCode(status_), TF_NOT_FOUND) << TF_Message(status_);
}
TEST_F(GCSFilesystemTest, StatFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string path = GetURIForPath("StatFile");
WriteString(path, "test");
ASSERT_TF_OK(status_);
TF_FileStatistics stat;
tf_gcs_filesystem::Stat(filesystem_, path.c_str(), &stat, status_);
EXPECT_TF_OK(status_);
EXPECT_EQ(4, stat.length);
EXPECT_FALSE(stat.is_directory);
}
TEST_F(GCSFilesystemTest, RenameFile) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileSrc");
const std::string dst = GetURIForPath("RenameFileDst");
WriteString(src, "test");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test", result);
}
TEST_F(GCSFilesystemTest, RenameFileOverwrite) {
tf_gcs_filesystem::Init(filesystem_, status_);
ASSERT_TF_OK(status_);
const std::string src = GetURIForPath("RenameFileOverwriteSrc");
const std::string dst = GetURIForPath("RenameFileOverwriteDst");
WriteString(src, "test_old");
ASSERT_TF_OK(status_);
WriteString(dst, "test_new");
ASSERT_TF_OK(status_);
tf_gcs_filesystem::PathExists(filesystem_, dst.c_str(), status_);
EXPECT_TF_OK(status_);
tf_gcs_filesystem::RenameFile(filesystem_, src.c_str(), dst.c_str(), status_);
EXPECT_TF_OK(status_);
auto result = ReadAll(dst);
EXPECT_TF_OK(status_);
EXPECT_EQ("test_old", result);
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_NoBlockCache) {
tf_gcs_filesystem::InitTest(filesystem_, false, 0, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(6);
int64_t read = tf_random_access_file::Read(file, 0, 6, &result[0], status_);
ASSERT_EQ(read, 6) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "012345") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 6, 6, &result[0], status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "6789") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_ReadAtEOF) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "0123456789", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(10);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 10) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "0123456789") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, result.length(), result.length(),
&result[0], status_);
ASSERT_EQ(read, 0) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "") << "Result: " << result << "\n";
}
TEST_F(GCSFilesystemTest, NewRandomAccessFile_Buffered_CachedOutOfRange) {
tf_gcs_filesystem::InitTest(filesystem_, false, 10, 0, 0, 0, 0, status_);
ASSERT_TF_OK(status_) << "Could not initialize filesystem. "
<< TF_Message(status_);
std::string path = GetURIForPath("a_file");
auto gcs_file =
static_cast<tf_gcs_filesystem::GCSFile*>(filesystem_->plugin_filesystem);
ASSERT_TRUE(InsertObject(path, "012345678", &gcs_file->gcs_client, status_));
TF_RandomAccessFile* file = new TF_RandomAccessFile;
tf_gcs_filesystem::NewRandomAccessFile(filesystem_, path.c_str(), file,
status_);
ASSERT_TF_OK(status_);
std::string result;
result.resize(5);
int64_t read = tf_random_access_file::Read(file, 0, result.length(),
&result[0], status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
ASSERT_EQ(result, "01234") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 4, result.length(), &result[0],
status_);
ASSERT_EQ(read, 5) << "Read: " << read << "\n";
ASSERT_TF_OK(status_);
result.resize(read);
ASSERT_EQ(result, "45678") << "Result: " << result << "\n";
read = tf_random_access_file::Read(file, 5, result.length(), &result[0],
status_);
ASSERT_EQ(read, 4) << "Read: " << read << "\n";
ASSERT_EQ(TF_GetCode(status_), TF_OUT_OF_RANGE) << TF_Message(status_);
result.resize(read);
ASSERT_EQ(result, "5678") << "Result: " << result << "\n";
}
}
}
GTEST_API_ int main(int argc, char** argv) {
tensorflow::testing::InstallStacktraceHandler();
if (!GetTmpDir()) {
std::cerr << "Could not read GCS_TEST_TMPDIR env";
return -1;
}
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/filesystem/plugins/gcs/gcs_filesystem_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5cab47d9-a1a6-4e16-bd6b-9e28cc39f160 | cpp | tensorflow/tensorflow | list_element_shape | tensorflow/lite/kernels/variants/list_kernels/list_element_shape.cc | tensorflow/lite/kernels/variants/list_kernels/list_element_shape_test.cc | #include <cstdint>
#include <cstring>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/kernels/variants/tensor_array.h"
namespace tflite {
namespace variants {
namespace ops {
namespace list_element_shape {
namespace {
using ::tflite::variants::TensorArray;
constexpr int kListInput = 0;
constexpr int kShapeOut = 0;
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
TF_LITE_ENSURE(context, list_input->type == kTfLiteVariant);
TfLiteTensor* shape_out;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kShapeOut, &shape_out));
TF_LITE_ENSURE_TYPES_EQ(context, shape_out->type, kTfLiteInt32);
SetTensorToDynamic(shape_out);
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* list_input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kListInput, &list_input));
const TensorArray* const list =
reinterpret_cast<const TensorArray*>(list_input->data.data);
const TfLiteIntArray& element_shape = *list->ElementShape();
TfLiteTensor* shape_out;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kShapeOut, &shape_out));
if (element_shape.size == 0) {
context->ResizeTensor(context, shape_out, BuildTfLiteArray(0).release());
GetTensorData<int32_t>(shape_out)[0] = -1;
} else if (element_shape.data[0] == 0) {
context->ResizeTensor(context, shape_out, BuildTfLiteArray({0}).release());
} else {
context->ResizeTensor(context, shape_out,
BuildTfLiteArray({element_shape.size}).release());
memcpy(GetTensorData<int32_t>(shape_out), element_shape.data,
element_shape.size * sizeof(int32_t));
}
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_LIST_ELEMENT_SHAPE() {
static TfLiteRegistration r = {nullptr, nullptr, list_element_shape::Prepare,
list_element_shape::Eval};
return &r;
}
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_kernels/test_util.h"
#include "tensorflow/lite/kernels/variants/list_ops_lib.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace variants {
namespace ops {
namespace {
using ::testing::ElementsAreArray;
class ListElementShapeModel : public ListOpModel {
public:
ListElementShapeModel() {
list_input_ = AddInput({TensorType_VARIANT, {}});
shape_output_ = AddOutput({TensorType_INT32, {}});
SetCustomOp("ListElementShape", {}, Register_LIST_ELEMENT_SHAPE);
BuildInterpreter({{}});
}
const TfLiteTensor* GetOutputTensor(int index) {
return interpreter_->tensor(index);
}
int list_input_;
int shape_output_;
};
TEST(ListElementShapeTest, MultiDimStaticShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {2, 2}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({2}));
ASSERT_THAT(std::vector<int>(out->data.i32, out->data.i32 + 2),
ElementsAreArray({2, 2}));
}
TEST(ListElementShapeTest, MultiDimWithDynamicDims) {
ListElementShapeModel m;
m.PopulateListTensor(0, {2, -1, 3}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({3}));
ASSERT_THAT(std::vector<int>(out->data.i32, out->data.i32 + 3),
ElementsAreArray({2, -1, 3}));
}
TEST(ListElementShapeTest, ScalarShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {0}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({0}));
ASSERT_EQ(out->bytes, 0);
}
TEST(ListElementShapeTest, UnrankedShape) {
ListElementShapeModel m;
m.PopulateListTensor(0, {}, 10, kTfLiteInt32);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
const TfLiteTensor* const out = m.GetOutputTensor(m.shape_output_);
ASSERT_THAT(out, DimsAre({}));
ASSERT_EQ(out->bytes, sizeof(int));
ASSERT_EQ(out->data.i32[0], -1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_element_shape.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/variants/list_kernels/list_element_shape_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4546d4dd-31ac-484a-90ac-7d1093f72fb1 | cpp | tensorflow/tensorflow | sdca_ops | tensorflow/core/ops/sdca_ops.cc | tensorflow/core/kernels/sdca_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
static Status ApplySdcaOptimizerShapeFn(InferenceContext* c) {
std::vector<ShapeHandle> sparse_handles;
if (c->input("sparse_weights", &sparse_handles).ok()) {
TF_RETURN_IF_ERROR(
c->set_output("out_delta_sparse_weights", sparse_handles));
}
std::vector<ShapeHandle> dense_handles;
if (c->input("dense_weights", &dense_handles).ok()) {
TF_RETURN_IF_ERROR(c->set_output("out_delta_dense_weights", dense_handles));
}
return c->set_output(
"out_example_state_data",
{c->Matrix(InferenceContext::kUnknownDim, c->MakeDim(4))});
}
REGISTER_OP("SdcaOptimizer")
.Attr(
"loss_type: {'logistic_loss', 'squared_loss', 'hinge_loss',"
"'smooth_hinge_loss', 'poisson_loss'}")
.Attr("adaptative : bool=false")
.Attr("num_sparse_features: int >= 0")
.Attr("num_sparse_features_with_values: int >= 0")
.Attr("num_dense_features: int >= 0")
.Attr("l1: float")
.Attr("l2: float")
.Attr("num_loss_partitions: int >= 1")
.Attr("num_inner_iterations: int >= 1")
.Input("sparse_example_indices: num_sparse_features * int64")
.Input("sparse_feature_indices: num_sparse_features * int64")
.Input("sparse_feature_values: num_sparse_features_with_values * float")
.Input("dense_features: num_dense_features * float")
.Input("example_weights: float")
.Input("example_labels: float")
.Input("sparse_indices: num_sparse_features * int64")
.Input("sparse_weights: num_sparse_features * float")
.Input("dense_weights: num_dense_features * float")
.Input("example_state_data: float")
.Output("out_example_state_data: float")
.Output("out_delta_sparse_weights: num_sparse_features * float")
.Output("out_delta_dense_weights: num_dense_features * float")
.SetShapeFn(ApplySdcaOptimizerShapeFn);
REGISTER_OP("SdcaOptimizerV2")
.Attr(
"loss_type: {'logistic_loss', 'squared_loss', 'hinge_loss',"
"'smooth_hinge_loss', 'poisson_loss'}")
.Attr("adaptive : bool=false")
.Attr("num_sparse_features: int >= 0")
.Attr("num_sparse_features_with_values: int >= 0")
.Attr("num_dense_features: int >= 0")
.Attr("l1: float")
.Attr("l2: float")
.Attr("num_loss_partitions: int >= 1")
.Attr("num_inner_iterations: int >= 1")
.Input("sparse_example_indices: num_sparse_features * int64")
.Input("sparse_feature_indices: num_sparse_features * int64")
.Input("sparse_feature_values: num_sparse_features_with_values * float")
.Input("dense_features: num_dense_features * float")
.Input("example_weights: float")
.Input("example_labels: float")
.Input("sparse_indices: num_sparse_features * int64")
.Input("sparse_weights: num_sparse_features * float")
.Input("dense_weights: num_dense_features * float")
.Input("example_state_data: float")
.Output("out_example_state_data: float")
.Output("out_delta_sparse_weights: num_sparse_features * float")
.Output("out_delta_dense_weights: num_dense_features * float")
.SetShapeFn(ApplySdcaOptimizerShapeFn);
REGISTER_OP("SdcaShrinkL1")
.Attr("num_features: int >= 0")
.Attr("l1: float")
.Attr("l2: float")
.Input("weights: Ref(num_features * float)")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("SdcaFprint")
.Input("input: string")
.Output("output: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
ShapeHandle output_shape;
TF_RETURN_IF_ERROR(c->Concatenate(handle, c->Vector(2), &output_shape));
c->set_output(0, output_shape);
return absl::OkStatus();
});
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
const SessionOptions* GetSingleThreadedOptions() {
static const SessionOptions* const kSessionOptions = []() {
SessionOptions* const result = new SessionOptions();
result->config.set_intra_op_parallelism_threads(1);
result->config.set_inter_op_parallelism_threads(1);
result->config.add_session_inter_op_thread_pool()->set_num_threads(1);
return result;
}();
return kSessionOptions;
}
const SessionOptions* GetMultiThreadedOptions() {
static const SessionOptions* const kSessionOptions = []() {
SessionOptions* const result = new SessionOptions();
result->config.set_intra_op_parallelism_threads(0);
result->config.set_inter_op_parallelism_threads(0);
result->config.add_session_inter_op_thread_pool()->set_num_threads(
0);
return result;
}();
return kSessionOptions;
}
Node* Var(Graph* const g, const int n) {
return test::graph::Var(g, DT_FLOAT, TensorShape({n}));
}
std::vector<Node*> VarVector(Graph* const g, const int nodes,
const int node_size) {
std::vector<Node*> result;
result.reserve(nodes);
for (int i = 0; i < nodes; ++i) {
result.push_back(Var(g, node_size));
}
return result;
}
Node* Zeros(Graph* const g, const TensorShape& shape) {
Tensor data(DT_FLOAT, shape);
data.flat<float>().setZero();
return test::graph::Constant(g, data);
}
Node* Zeros(Graph* const g, const int n) { return Zeros(g, TensorShape({n})); }
Node* Ones(Graph* const g, const int n) {
Tensor data(DT_FLOAT, TensorShape({n}));
test::FillFn<float>(&data, [](const int i) { return 1.0f; });
return test::graph::Constant(g, data);
}
Node* SparseIndices(Graph* const g, const int sparse_features_per_group) {
Tensor data(DT_INT64, TensorShape({sparse_features_per_group}));
test::FillFn<int64_t>(&data, [&](const int i) { return i; });
return test::graph::Constant(g, data);
}
Node* SparseExampleIndices(Graph* const g, const int sparse_features_per_group,
const int num_examples) {
const int x_size = num_examples * 4;
Tensor data(DT_INT64, TensorShape({x_size}));
test::FillFn<int64_t>(&data, [&](const int i) { return i / 4; });
return test::graph::Constant(g, data);
}
Node* SparseFeatureIndices(Graph* const g, const int sparse_features_per_group,
const int num_examples) {
const int x_size = num_examples * 4;
Tensor data(DT_INT64, TensorShape({x_size}));
test::FillFn<int64_t>(
&data, [&](const int i) { return i % sparse_features_per_group; });
return test::graph::Constant(g, data);
}
Node* RandomZeroOrOne(Graph* const g, const int n) {
Tensor data(DT_FLOAT, TensorShape({n}));
test::FillFn<float>(&data, [](const int i) {
return (random::New64() % 2) == 0 ? 0.0f : 1.0f;
});
return test::graph::Constant(g, data);
}
Node* RandomZeroOrOneMatrix(Graph* const g, const int n, int d) {
Tensor data(DT_FLOAT, TensorShape({n, d}));
test::FillFn<float>(&data, [](const int i) {
return (random::New64() % 2) == 0 ? 0.0f : 1.0f;
});
return test::graph::Constant(g, data);
}
void GetGraphs(const int32_t num_examples,
const int32_t num_sparse_feature_groups,
const int32_t sparse_features_per_group,
const int32_t num_dense_feature_groups,
const int32_t dense_features_per_group, Graph** const init_g,
Graph** train_g) {
{
Graph* g = new Graph(OpRegistry::Global());
std::vector<Node*> sparse_weight_nodes =
VarVector(g, num_sparse_feature_groups, sparse_features_per_group);
std::vector<Node*> dense_weight_nodes =
VarVector(g, num_dense_feature_groups, dense_features_per_group);
Node* const multi_zero = Zeros(g, sparse_features_per_group);
for (Node* n : sparse_weight_nodes) {
test::graph::Assign(g, n, multi_zero);
}
Node* const zero = Zeros(g, dense_features_per_group);
for (Node* n : dense_weight_nodes) {
test::graph::Assign(g, n, zero);
}
*init_g = g;
}
{
Graph* g = new Graph(OpRegistry::Global());
std::vector<Node*> sparse_weight_nodes =
VarVector(g, num_sparse_feature_groups, sparse_features_per_group);
std::vector<Node*> dense_weight_nodes =
VarVector(g, num_dense_feature_groups, dense_features_per_group);
std::vector<NodeBuilder::NodeOut> sparse_indices;
std::vector<NodeBuilder::NodeOut> sparse_weights;
for (Node* n : sparse_weight_nodes) {
sparse_indices.push_back(
NodeBuilder::NodeOut(SparseIndices(g, sparse_features_per_group)));
sparse_weights.push_back(NodeBuilder::NodeOut(n));
}
std::vector<NodeBuilder::NodeOut> dense_weights;
dense_weights.reserve(dense_weight_nodes.size());
for (Node* n : dense_weight_nodes) {
dense_weights.push_back(NodeBuilder::NodeOut(n));
}
std::vector<NodeBuilder::NodeOut> sparse_example_indices;
std::vector<NodeBuilder::NodeOut> sparse_feature_indices;
std::vector<NodeBuilder::NodeOut> sparse_values;
sparse_example_indices.reserve(num_sparse_feature_groups);
for (int i = 0; i < num_sparse_feature_groups; ++i) {
sparse_example_indices.push_back(NodeBuilder::NodeOut(
SparseExampleIndices(g, sparse_features_per_group, num_examples)));
}
sparse_feature_indices.reserve(num_sparse_feature_groups);
for (int i = 0; i < num_sparse_feature_groups; ++i) {
sparse_feature_indices.push_back(NodeBuilder::NodeOut(
SparseFeatureIndices(g, sparse_features_per_group, num_examples)));
}
sparse_values.reserve(num_sparse_feature_groups);
for (int i = 0; i < num_sparse_feature_groups; ++i) {
sparse_values.push_back(
NodeBuilder::NodeOut(RandomZeroOrOne(g, num_examples * 4)));
}
std::vector<NodeBuilder::NodeOut> dense_features;
dense_features.reserve(num_dense_feature_groups);
for (int i = 0; i < num_dense_feature_groups; ++i) {
dense_features.push_back(NodeBuilder::NodeOut(
RandomZeroOrOneMatrix(g, num_examples, dense_features_per_group)));
}
Node* const weights = Ones(g, num_examples);
Node* const labels = RandomZeroOrOne(g, num_examples);
Node* const example_state_data = Zeros(g, TensorShape({num_examples, 4}));
Node* sdca = nullptr;
TF_CHECK_OK(
NodeBuilder(g->NewName("sdca"), "SdcaOptimizer")
.Attr("loss_type", "logistic_loss")
.Attr("num_sparse_features", num_sparse_feature_groups)
.Attr("num_sparse_features_with_values", num_sparse_feature_groups)
.Attr("num_dense_features", num_dense_feature_groups)
.Attr("l1", 0.0)
.Attr("l2", 1.0)
.Attr("num_loss_partitions", 1)
.Attr("num_inner_iterations", 2)
.Input(sparse_example_indices)
.Input(sparse_feature_indices)
.Input(sparse_values)
.Input(dense_features)
.Input(weights)
.Input(labels)
.Input(sparse_indices)
.Input(sparse_weights)
.Input(dense_weights)
.Input(example_state_data)
.Finalize(g, &sdca));
*train_g = g;
}
}
void BM_SDCA(::testing::benchmark::State& state) {
const int num_examples = state.range(0);
Graph* init = nullptr;
Graph* train = nullptr;
GetGraphs(num_examples, 20 ,
5 , 1 ,
20 , &init, &train);
test::Benchmark("cpu", train, GetSingleThreadedOptions(), init, nullptr, "",
false)
.Run(state);
}
void BM_SDCA_LARGE_DENSE(::testing::benchmark::State& state) {
const int num_examples = state.range(0);
Graph* init = nullptr;
Graph* train = nullptr;
GetGraphs(num_examples, 0 ,
0 , 5 ,
200000 , &init, &train);
test::Benchmark("cpu", train, GetSingleThreadedOptions(), init, nullptr, "",
false)
.Run(state);
}
void BM_SDCA_LARGE_SPARSE(::testing::benchmark::State& state) {
const int num_examples = state.range(0);
Graph* init = nullptr;
Graph* train = nullptr;
GetGraphs(num_examples, 65 ,
1e6 , 0 ,
0 , &init, &train);
test::Benchmark("cpu", train, GetMultiThreadedOptions(), init, nullptr, "",
false)
.Run(state);
}
}
BENCHMARK(BM_SDCA)->Arg(128)->Arg(256)->Arg(512)->Arg(1024);
BENCHMARK(BM_SDCA_LARGE_DENSE)->Arg(128)->Arg(256)->Arg(512)->Arg(1024);
BENCHMARK(BM_SDCA_LARGE_SPARSE)->Arg(128)->Arg(256)->Arg(512)->Arg(1024);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sdca_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sdca_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0ef1394-ad72-454a-8904-8a48b9b75687 | cpp | abseil/abseil-cpp | algorithm | absl/algorithm/algorithm.h | absl/algorithm/algorithm_test.cc | #ifndef ABSL_ALGORITHM_ALGORITHM_H_
#define ABSL_ALGORITHM_ALGORITHM_H_
#include <algorithm>
#include <iterator>
#include <type_traits>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
using std::equal;
using std::rotate;
template <typename InputIterator, typename EqualityComparable>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool linear_search(
InputIterator first, InputIterator last, const EqualityComparable& value) {
return std::find(first, last, value) != last;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/algorithm/algorithm.h"
#include <array>
#include <vector>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace {
class LinearSearchTest : public testing::Test {
protected:
LinearSearchTest() : container_{1, 2, 3} {}
static bool Is3(int n) { return n == 3; }
static bool Is4(int n) { return n == 4; }
std::vector<int> container_;
};
TEST_F(LinearSearchTest, linear_search) {
EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3));
EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4));
}
TEST_F(LinearSearchTest, linear_searchConst) {
const std::vector<int> *const const_container = &container_;
EXPECT_TRUE(
absl::linear_search(const_container->begin(), const_container->end(), 3));
EXPECT_FALSE(
absl::linear_search(const_container->begin(), const_container->end(), 4));
}
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST_F(LinearSearchTest, Constexpr) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::linear_search(kArray.begin(), kArray.end(), 3));
static_assert(!absl::linear_search(kArray.begin(), kArray.end(), 4));
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/algorithm/algorithm.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/algorithm/algorithm_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
7cf7d152-112a-442f-9cb2-370feb836587 | cpp | tensorflow/tensorflow | gather_expander | third_party/xla/xla/service/gather_expander.cc | third_party/xla/xla/service/gather_expander_test.cc | #include "xla/service/gather_expander.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/while_util.h"
#include "xla/util.h"
namespace xla {
namespace {
absl::StatusOr<HloInstruction*> TransposeIndexVectorDimToLast(
HloInstruction* start_indices, int64_t index_vector_dim) {
const Shape& start_indices_shape = start_indices->shape();
if (start_indices_shape.dimensions_size() == index_vector_dim) {
return start_indices;
}
if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) {
return start_indices;
}
std::vector<int64_t> permutation;
permutation.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
permutation.push_back(i);
}
}
permutation.push_back(index_vector_dim);
return MakeTransposeHlo(start_indices, permutation);
}
absl::StatusOr<HloInstruction*> CanonicalizeGatherIndices(
HloInstruction* start_indices, int64_t index_vector_dim) {
TF_ASSIGN_OR_RETURN(
HloInstruction * transposed_start_indices,
TransposeIndexVectorDimToLast(start_indices, index_vector_dim));
bool indices_are_scalar =
index_vector_dim == start_indices->shape().dimensions_size();
const int64_t index_dims_in_start_indices = indices_are_scalar ? 0 : 1;
const Shape& shape = transposed_start_indices->shape();
if (shape.dimensions_size() == index_dims_in_start_indices) {
return PrependDegenerateDims(transposed_start_indices, 1);
} else {
return CollapseFirstNDims(
transposed_start_indices,
shape.dimensions_size() - index_dims_in_start_indices);
}
}
absl::StatusOr<HloInstruction*> AdjustBatchDimsInAccumulator(
const Shape& start_indices_shape, HloInstruction* accumulator,
int64_t index_vector_dim) {
std::vector<int64_t> batch_dim_bounds;
batch_dim_bounds.reserve(start_indices_shape.dimensions_size());
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != index_vector_dim) {
batch_dim_bounds.push_back(start_indices_shape.dimensions(i));
}
}
if (batch_dim_bounds.empty()) {
return ElideDegenerateDims(accumulator, {0});
}
return ExpandFirstDimIntoNDims(accumulator, batch_dim_bounds);
}
absl::StatusOr<HloInstruction*> ExpandIndexVectorIntoOperandSpace(
HloInstruction* index_vector, const GatherDimensionNumbers& dim_numbers,
int64_t operand_rank) {
HloComputation* computation = index_vector->parent();
const Shape& index_shape = index_vector->shape();
if (operand_rank == 0) {
return computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {0})));
}
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateFromDimensions(index_shape.element_type(), {1})));
std::vector<HloInstruction*> expanded_index_components;
for (int i = 0; i < operand_rank; i++) {
int64_t index_vector_dim_index =
FindIndex(dim_numbers.start_index_map(), i);
if (index_vector_dim_index != dim_numbers.start_index_map_size()) {
TF_ASSIGN_OR_RETURN(
HloInstruction * component_to_concat,
MakeSliceHlo(index_vector, {index_vector_dim_index},
{index_vector_dim_index + 1},
{1}));
expanded_index_components.push_back(component_to_concat);
} else {
expanded_index_components.push_back(zero);
}
}
return MakeConcatHlo(expanded_index_components, 0);
}
absl::StatusOr<std::vector<HloInstruction*>> GatherLoopBody(
const HloInstruction& gather, HloInstruction* induction_var,
const std::vector<HloInstruction*>& incoming_loop_state) {
const GatherDimensionNumbers& dim_numbers = gather.gather_dimension_numbers();
CHECK_EQ(incoming_loop_state.size(), 3);
HloInstruction* const operand = incoming_loop_state[0];
HloInstruction* const start_indices = incoming_loop_state[1];
HloInstruction* const output_accumulator = incoming_loop_state[2];
bool has_scalar_indices = start_indices->shape().dimensions_size() == 1;
CHECK_EQ(has_scalar_indices,
dim_numbers.index_vector_dim() ==
gather.operand(1)->shape().dimensions_size());
HloInstruction* induction_var_as_vector =
MakeBroadcastHlo(induction_var, {},
{1});
HloInstruction* index_vector;
if (has_scalar_indices) {
TF_ASSIGN_OR_RETURN(
index_vector,
MakeDynamicSliceHlo(start_indices, induction_var_as_vector, {1}));
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * index_into_start_indices,
PadVectorWithZeros(induction_var_as_vector,
0, 1));
int64_t index_vector_size = start_indices->shape().dimensions(1);
TF_ASSIGN_OR_RETURN(
HloInstruction * index_vector_2d,
MakeDynamicSliceHlo(start_indices, index_into_start_indices,
{1, index_vector_size}));
TF_ASSIGN_OR_RETURN(index_vector,
ElideDegenerateDims(index_vector_2d, {0}));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * gathered_slice_start,
ExpandIndexVectorIntoOperandSpace(index_vector, dim_numbers,
operand->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(HloInstruction * gathered_slice,
MakeDynamicSliceHlo(operand, gathered_slice_start,
gather.gather_slice_sizes()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_with_dims_collapsed,
ElideDegenerateDims(gathered_slice, dim_numbers.collapsed_slice_dims()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const gathered_slice_for_update,
PrependDegenerateDims(gathered_slice_with_dims_collapsed, 1));
TF_ASSIGN_OR_RETURN(
HloInstruction* const index_vector_into_accumulator,
PadVectorWithZeros(
induction_var_as_vector, 0,
gathered_slice_with_dims_collapsed->shape().dimensions_size()));
TF_ASSIGN_OR_RETURN(
HloInstruction* const updated_accumulator,
MakeDynamicUpdateSliceHlo(output_accumulator, gathered_slice_for_update,
index_vector_into_accumulator));
return absl::StatusOr<std::vector<HloInstruction*>>{
{operand, start_indices, updated_accumulator}};
}
HloInstruction* CreateGatherLoopAccumulatorInitValue(
HloComputation* computation, PrimitiveType element_type,
absl::Span<const int64_t> slice_sizes, int64_t gather_loop_trip_count,
const GatherDimensionNumbers& dim_numbers) {
std::vector<int64_t> accumulator_state_shape_dims;
accumulator_state_shape_dims.reserve(1 + slice_sizes.size());
accumulator_state_shape_dims.push_back(gather_loop_trip_count);
for (int64_t i = 0; i < slice_sizes.size(); i++) {
if (!absl::c_binary_search(dim_numbers.collapsed_slice_dims(), i)) {
accumulator_state_shape_dims.push_back(slice_sizes[i]);
}
}
return BroadcastZeros(computation, element_type,
accumulator_state_shape_dims);
}
absl::StatusOr<HloInstruction*> PermuteBatchAndOffsetDims(
HloInstruction* accumulator, absl::Span<const int64_t> offset_dims,
int64_t output_rank) {
std::vector<int64_t> permutation;
permutation.reserve(output_rank);
int64_t batch_idx_counter = 0;
int64_t offset_idx_counter = output_rank - offset_dims.size();
for (int64_t i = 0; i < output_rank; i++) {
bool is_offset_dim = absl::c_binary_search(offset_dims, i);
if (is_offset_dim) {
permutation.push_back(offset_idx_counter++);
} else {
permutation.push_back(batch_idx_counter++);
}
}
return MakeTransposeHlo(accumulator, permutation);
}
int64_t GatherLoopTripCount(HloInstruction* gather_instr) {
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& start_indices_shape = start_indices->shape();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t trip_count = 1;
for (int64_t i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) {
if (i != dim_numbers.index_vector_dim()) {
trip_count *= start_indices_shape.dimensions(i);
}
}
return trip_count;
}
int64_t GatherIsBroadcast(HloInstruction* gather_instr) {
return absl::c_equal(gather_instr->gather_slice_sizes(),
gather_instr->operand(0)->shape().dimensions());
}
}
absl::StatusOr<HloInstruction*> GatherExpander::ExpandInstruction(
HloInstruction* gather_instr) {
CHECK(!ShapeUtil::IsZeroElementArray(gather_instr->shape()));
if (GatherIsBroadcast(gather_instr)) {
if (ShapeUtil::IsZeroElementArray(gather_instr->operand(0)->shape())) {
return MakeScalarLike(gather_instr, 0);
}
Shape broadcast_operand_shape = ShapeUtil::DeleteDimensions(
gather_instr->gather_dimension_numbers().collapsed_slice_dims(),
gather_instr->operand(0)->shape());
TF_ASSIGN_OR_RETURN(HloInstruction * broadcast_operand,
MakeReshapeHlo(broadcast_operand_shape,
gather_instr->mutable_operand(0)));
gather_instr->SetupDerivedInstruction(broadcast_operand);
HloInstruction* broadcast =
MakeBroadcastHlo(broadcast_operand,
gather_instr->gather_dimension_numbers().offset_dims(),
gather_instr->shape());
gather_instr->SetupDerivedInstruction(broadcast);
return broadcast;
}
HloComputation* computation = gather_instr->parent();
HloInstruction* operand = gather_instr->mutable_operand(0);
HloInstruction* start_indices = gather_instr->mutable_operand(1);
const Shape& output_shape = gather_instr->shape();
int64_t output_rank = output_shape.dimensions_size();
const GatherDimensionNumbers& dim_numbers =
gather_instr->gather_dimension_numbers();
int64_t gather_loop_trip_count = GatherLoopTripCount(gather_instr);
if (!IsInt32(gather_loop_trip_count)) {
return Unimplemented(
"Gather operations with more than 2147483647 gather indices are not "
"supported. This error occurred for %s.",
gather_instr->ToString());
}
TF_ASSIGN_OR_RETURN(
HloInstruction * canonical_start_indices,
CanonicalizeGatherIndices(start_indices, dim_numbers.index_vector_dim()));
CHECK_EQ(gather_loop_trip_count,
canonical_start_indices->shape().dimensions(0));
HloInstruction* accumulator_init = CreateGatherLoopAccumulatorInitValue(
computation, output_shape.element_type(),
gather_instr->gather_slice_sizes(), gather_loop_trip_count,
gather_instr->gather_dimension_numbers());
absl::StatusOr<std::vector<HloInstruction*>> gather_loop_result_or_error =
WhileUtil::MakeCountedLoop(
computation, gather_loop_trip_count,
{operand, canonical_start_indices, accumulator_init},
[&](HloInstruction* indvar,
const std::vector<HloInstruction*>& loop_state) {
return GatherLoopBody(*gather_instr, indvar, loop_state);
},
gather_instr->metadata());
TF_ASSIGN_OR_RETURN(std::vector<HloInstruction*> gather_loop_result,
gather_loop_result_or_error);
HloInstruction* accumulator_result = gather_loop_result.back();
TF_ASSIGN_OR_RETURN(
HloInstruction* const accumulator_with_batch_dims_decanonicalized,
AdjustBatchDimsInAccumulator(start_indices->shape(), accumulator_result,
dim_numbers.index_vector_dim()));
return PermuteBatchAndOffsetDims(accumulator_with_batch_dims_decanonicalized,
dim_numbers.offset_dims(), output_rank);
}
bool GatherExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kGather &&
!ShapeUtil::IsZeroElementArray(inst->shape()) &&
(mode_ == kEliminateAllGathers || GatherLoopTripCount(inst) == 1 ||
absl::c_equal(inst->gather_slice_sizes(),
inst->operand(0)->shape().dimensions()));
}
} | #include "xla/service/gather_expander.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
using GatherExpanderTest = HloTestBase;
TEST_F(GatherExpanderTest, ErrorStatusOnTooManyIndices) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherMultipleBatchDims
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2147483647,5] parameter(1)
ROOT gather = s32[2147483647,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
absl::Status status = GatherExpander{GatherExpander::kEliminateAllGathers}
.Run(module.get())
.status();
EXPECT_EQ(status.code(), tsl::error::UNIMPLEMENTED);
ASSERT_THAT(
status.message(),
::testing::HasSubstr("Gather operations with more than 2147483647 gather "
"indices are not supported."));
}
TEST_F(GatherExpanderTest, AvoidDegenerateDims) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
const Shape& while_shape = while_instr->shape();
ASSERT_TRUE(while_shape.IsTuple());
ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4);
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {3, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 1)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2}),
ShapeUtil::GetTupleElementShape(while_shape, 2)));
EXPECT_TRUE(ShapeUtil::SameDimensions(
ShapeUtil::MakeShape(S32, {2, 3}),
ShapeUtil::GetTupleElementShape(while_shape, 3)));
}
TEST_F(GatherExpanderTest, CheckOpMetadata) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV2
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
OpMetadata metadata;
metadata.set_op_name("Gather");
module->entry_computation()->root_instruction()->set_metadata(metadata);
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
GatherExpander{GatherExpander::kEliminateAllGathers}.Run(module.get()));
ASSERT_TRUE(changed);
HloInstruction* while_instr = nullptr;
for (auto* instr : module->entry_computation()->instructions()) {
if (instr->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
while_instr = instr;
}
}
ASSERT_NE(while_instr, nullptr)
<< "Expected exactly one while instruction in the entry computation "
"after gather expansion";
EXPECT_EQ(while_instr->metadata().op_name(), "Gather");
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersSkipsNontrivialGather) {
const std::string hlo_text = R"(
HloModule TensorFlowGatherV1
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
ROOT gather = s32[2,3] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1, 3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_FALSE(changed);
}
TEST_F(GatherExpanderTest, EliminateSimpleGathersRewritesTrivialGather) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[100] parameter(0)
indices = s32[1] parameter(1)
ROOT gather = s32[10] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=0,
slice_sizes={10}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateAllGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
}
TEST_F(GatherExpanderTest, GatherIsBroadcast) {
const std::string hlo_text = R"(
HloModule test
ENTRY main {
operand = s32[1,3] parameter(0)
indices = s32[7,5] parameter(1)
ROOT gather = s32[7,3,5] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=2,
slice_sizes={1,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GatherExpander pass(GatherExpander::kEliminateSimpleGathers);
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&pass, module.get()));
ASSERT_TRUE(changed);
ASSERT_FALSE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kGather}));
ASSERT_TRUE(hlo_query::ContainsInstrWithOpcode(module->entry_computation(),
{HloOpcode::kBroadcast}));
module->VerifyOrAddFailure("after-gather-expander.");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gather_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71508418-052b-485a-b417-d600f37f8823 | cpp | tensorflow/tensorflow | tf_status_helper | tensorflow/c/tf_status_helper.cc | tensorflow/c/tf_status_helper_test.cc | #include "tensorflow/c/tf_status_helper.h"
#include <string>
#include "tensorflow/c/tf_status.h"
#include "xla/tsl/c/tsl_status_helper.h"
namespace tsl {
void Set_TF_Status_from_Status(TF_Status* tf_status,
const absl::Status& status) {
TF_SetStatus(tf_status, TSLCodeFromStatusCode(status.code()),
absl::StatusMessageAsCStr(status));
status.ForEachPayload(
[tf_status](absl::string_view key, const absl::Cord& value) {
std::string key_str(key);
std::string value_str(value);
TF_SetPayload(tf_status, key_str.c_str(), value_str.c_str());
});
}
absl::Status StatusFromTF_Status(const TF_Status* tf_status) {
absl::Status status(StatusCodeFromTSLCode(TF_GetCode(tf_status)),
TF_Message(tf_status));
TF_ForEachPayload(
tf_status,
[](const char* key, const char* value, void* capture) {
absl::Status* status = static_cast<absl::Status*>(capture);
status->SetPayload(key, absl::Cord(absl::string_view(value)));
},
&status);
return status;
}
} | #include "tensorflow/c/tf_status_helper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
TEST(StatusHelper, TestStatusHelper) {
TSL_Status* s = TSL_NewStatus();
absl::Status cc_status(absl::InvalidArgumentError("some error"));
cc_status.SetPayload("key1", absl::Cord("value1"));
cc_status.SetPayload("key2", absl::Cord("value2"));
Set_TF_Status_from_Status(s, cc_status);
ASSERT_EQ(TSL_INVALID_ARGUMENT, TSL_GetCode(s));
ASSERT_EQ(std::string("some error"), TSL_Message(s));
absl::Status another_cc_status(StatusFromTF_Status(s));
ASSERT_FALSE(another_cc_status.ok());
ASSERT_EQ(std::string("some error"), another_cc_status.message());
ASSERT_EQ(error::INVALID_ARGUMENT, another_cc_status.code());
ASSERT_EQ(cc_status.GetPayload("key1"), another_cc_status.GetPayload("key1"));
ASSERT_EQ(cc_status.GetPayload("key2"), another_cc_status.GetPayload("key2"));
TSL_DeleteStatus(s);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/tf_status_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/tf_status_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90c6fcba-9e56-454e-aa32-12dd171fd942 | cpp | tensorflow/tensorflow | stable_delegate_provider | tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider.cc | tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider_test.cc | #include <cstdint>
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/tools/command_line_flags.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/tool_params.h"
#if !defined(_WIN32)
#include "tensorflow/lite/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/acceleration/configuration/c/stable_delegate.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/delegate_loader.h"
#include "tensorflow/lite/delegates/utils/experimental/stable_delegate/tflite_settings_json_parser.h"
#endif
namespace tflite {
namespace tools {
#if !defined(_WIN32)
namespace {
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path);
class StableDelegatePluginLoader {
public:
static StableDelegatePluginLoader& GetInstance() {
static StableDelegatePluginLoader* const instance =
new StableDelegatePluginLoader;
return *instance;
}
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path);
private:
struct CacheEntry {
const TfLiteStableDelegate* stable_delegate = nullptr;
delegates::utils::TfLiteSettingsJsonParser parser;
const TFLiteSettings* parsed_settings = nullptr;
};
StableDelegatePluginLoader() = default;
const CacheEntry* LoadStableDelegatePlugin(
const std::string& json_settings_file_path);
std::map<std::string , CacheEntry> cache_;
};
const StableDelegatePluginLoader::CacheEntry*
StableDelegatePluginLoader::LoadStableDelegatePlugin(
const std::string& json_settings_file_path) {
auto it = cache_.find(json_settings_file_path);
if (it != cache_.end()) {
return &it->second;
}
CacheEntry result;
const TFLiteSettings* tflite_settings =
result.parser.Parse(json_settings_file_path);
result.parsed_settings = tflite_settings;
if (!tflite_settings || !tflite_settings->stable_delegate_loader_settings() ||
!tflite_settings->stable_delegate_loader_settings()->delegate_path()) {
TFLITE_LOG(ERROR) << "Invalid TFLiteSettings for the stable delegate.";
result.stable_delegate = nullptr;
} else {
std::string delegate_path =
tflite_settings->stable_delegate_loader_settings()
->delegate_path()
->str();
result.stable_delegate =
delegates::utils::LoadDelegateFromSharedLibrary(delegate_path);
if (!result.stable_delegate || !result.stable_delegate->delegate_plugin) {
TFLITE_LOG(ERROR) << "Failed to load stable ABI delegate from stable ABI "
"delegate binary ("
<< delegate_path << ").";
}
}
auto it2 = cache_.emplace(json_settings_file_path, std::move(result)).first;
return &it2->second;
}
TfLiteDelegatePtr CreateStableDelegate(
const std::string& json_settings_file_path) {
return StableDelegatePluginLoader::GetInstance().CreateStableDelegate(
json_settings_file_path);
}
TfLiteDelegatePtr StableDelegatePluginLoader::CreateStableDelegate(
const std::string& json_settings_file_path) {
if (json_settings_file_path.empty()) {
return CreateNullDelegate();
}
const CacheEntry* entry =
StableDelegatePluginLoader::GetInstance().LoadStableDelegatePlugin(
json_settings_file_path);
if (!entry || !entry->stable_delegate ||
!entry->stable_delegate->delegate_plugin) {
return CreateNullDelegate();
}
const TfLiteOpaqueDelegatePlugin* delegate_plugin =
entry->stable_delegate->delegate_plugin;
return TfLiteDelegatePtr(delegate_plugin->create(entry->parsed_settings),
delegate_plugin->destroy);
}
}
#endif
class StableAbiDelegateProvider : public DelegateProvider {
public:
StableAbiDelegateProvider() {
default_params_.AddParam("stable_delegate_settings_file",
ToolParam::Create<std::string>(""));
}
std::vector<Flag> CreateFlags(ToolParams* params) const final;
void LogParams(const ToolParams& params, bool verbose) const final;
TfLiteDelegatePtr CreateTfLiteDelegate(const ToolParams& params) const final;
std::pair<TfLiteDelegatePtr, int> CreateRankedTfLiteDelegate(
const ToolParams& params) const final;
std::string GetName() const final { return "STABLE_DELEGATE"; }
};
REGISTER_DELEGATE_PROVIDER(StableAbiDelegateProvider);
std::vector<Flag> StableAbiDelegateProvider::CreateFlags(
ToolParams* params) const {
std::vector<Flag> flags = {
CreateFlag<std::string>("stable_delegate_settings_file", params,
"The path to the delegate settings JSON file.")};
return flags;
}
void StableAbiDelegateProvider::LogParams(const ToolParams& params,
bool verbose) const {
if (params.Get<std::string>("stable_delegate_settings_file").empty()) return;
LOG_TOOL_PARAM(params, std::string, "stable_delegate_settings_file",
"Delegate settings file path", verbose);
}
TfLiteDelegatePtr StableAbiDelegateProvider::CreateTfLiteDelegate(
const ToolParams& params) const {
#if !defined(_WIN32)
std::string stable_delegate_settings_file =
params.Get<std::string>("stable_delegate_settings_file");
return CreateStableDelegate(stable_delegate_settings_file);
#else
return CreateNullDelegate();
#endif
}
std::pair<TfLiteDelegatePtr, int>
StableAbiDelegateProvider::CreateRankedTfLiteDelegate(
const ToolParams& params) const {
auto ptr = CreateTfLiteDelegate(params);
return std::make_pair(std::move(ptr), params.GetPosition<std::string>(
"stable_delegate_settings_file"));
}
}
} | #include <string>
#include <vector>
#include <gtest/gtest.h>
#include "pthreadpool.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace tools {
namespace {
static constexpr char kTestSettingsSrcDir[] =
"tensorflow/lite/tools/delegates/experimental/stable_delegate/";
static constexpr char kGoodStableDelegateSettings[] =
"test_sample_stable_delegate_settings.json";
static constexpr char kGoodXNNPackDelegateSettings[] =
"test_stable_xnnpack_settings.json";
static constexpr char kBadMissingFile[] = "missing.json";
static constexpr char kBadInvalidSettings[] = "test_invalid_settings.json";
static constexpr char kBadMissingStableDelegateSettings[] =
"test_missing_stable_delegate_settings.json";
static constexpr char kBadMissingDelegatePathSettings[] =
"test_missing_delegate_path_settings.json";
std::vector<ProvidedDelegateList::ProvidedDelegate> CreateDelegates(
const std::string& settings_file_path) {
ToolParams params;
ProvidedDelegateList providers(¶ms);
providers.AddAllDelegateParams();
params.Set<std::string>("stable_delegate_settings_file", settings_file_path,
1);
return providers.CreateAllRankedDelegates();
}
TEST(StableAbiDelegateProviderTest, CreateDelegate) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) +
kGoodStableDelegateSettings);
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("STABLE_DELEGATE", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
}
TEST(StableAbiDelegateProviderTest, CreateDelegateWithStableXNNPack) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) +
kGoodXNNPackDelegateSettings);
EXPECT_EQ(1, delegates.size());
EXPECT_EQ("STABLE_DELEGATE", delegates.front().provider->GetName());
EXPECT_NE(nullptr, delegates.front().delegate.get());
EXPECT_EQ(1, delegates.front().rank);
pthreadpool_t threadpool = static_cast<pthreadpool_t>(
TfLiteXNNPackDelegateGetThreadPool(delegates.front().delegate.get()));
EXPECT_EQ(5, pthreadpool_get_threads_count(threadpool));
}
TEST(StableAbiDelegateProviderTest, CreateDelegateFailedWithInvalidSettings) {
std::vector<std::string> invalid_settings_names = {
kBadMissingFile, kBadInvalidSettings, kBadMissingStableDelegateSettings,
kBadMissingDelegatePathSettings};
for (const std::string& name : invalid_settings_names) {
auto delegates = CreateDelegates(std::string(kTestSettingsSrcDir) + name);
EXPECT_EQ(0, delegates.size());
}
}
TEST(StableAbiDelegateProviderTest, CreateDelegateFailedWithBlankSettingsPath) {
auto delegates = CreateDelegates("");
EXPECT_EQ(0, delegates.size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/delegates/experimental/stable_delegate/stable_delegate_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7d36866-411c-46ed-801c-d8994c2f600b | cpp | tensorflow/tensorflow | scatter_expander | third_party/xla/xla/service/gpu/transforms/scatter_expander.cc | third_party/xla/xla/service/scatter_expander_test.cc | #include "xla/service/gpu/transforms/scatter_expander.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
namespace xla {
bool GpuScatterExpander::InstructionMatchesPattern(HloInstruction* inst) {
return inst->opcode() == HloOpcode::kScatter &&
(inst->shape().IsTuple() ||
primitive_util::BitWidth(inst->shape().element_type()) > 64);
}
} | #include "xla/service/scatter_expander.h"
#include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
namespace xla {
namespace {
class ScatterExpanderTest : public HloTestBase {
protected:
void ClearInstructionLayout(HloModule* module, absl::string_view inst_name) {
HloInstruction* inst = FindInstruction(module, inst_name);
inst->mutable_shape()->clear_layout();
}
};
TEST_F(ScatterExpanderTest, ScatterOperandWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, ScatterMultipleOperandsWithoutLayout) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] constant({2,4,6,8,10})
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(1)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateAllScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = s32[2,3] parameter(2)
ROOT scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutpuScattersSkipsNontrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[3,3] parameter(0)
operand1 = bf16[3,3] parameter(1)
indices = s32[2] parameter(2)
update0 = s32[2,3] parameter(3)
update1 = bf16[2,3] parameter(4)
ROOT scatter = (s32[3,3], bf16[3,3]) scatter(operand0, operand1, indices, update0, update1),
to_apply=scatter_computation,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateSimpleScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
parameter0 = s32[] parameter(0)
ROOT parameter1 = s32[] parameter(1)
}
ENTRY kernel_entry {
operand = s32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update = s32[] constant(0)
ROOT scatter = s32[5]{0} scatter(operand, indices, update),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest,
EliminateSimpleMultioutputScattersRewritesTrivialScatter) {
const char* kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
p0 = s32[] parameter(0)
p1 = f32[] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[] parameter(3)
ROOT tuple = tuple(p2, p3)
}
ENTRY kernel_entry {
operand0 = s32[5] iota(), iota_dimension=0
operand1 = f32[5] iota(), iota_dimension=0
indices = s32[1] parameter(0)
update0 = s32[] constant(0)
update1 = f32[] constant(0)
ROOT scatter = (s32[5]{0}, f32[5]{0}) scatter(operand0, operand1, indices, update0, update1),
update_window_dims={}, inserted_window_dims={0},
scatter_dims_to_operand_dims={0}, index_vector_dim=0,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ClearInstructionLayout(module.get(), "operand0");
ClearInstructionLayout(module.get(), "operand1");
ScatterExpander scatter_expander(ScatterExpander::kEliminateSimpleScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = s32[] parameter(1)
arg0.172 = s32[] parameter(0)
ROOT add.48 = s32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = s32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = s32[4096,1,1] parameter(2)
ROOT scatter.48 = s32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
TEST_F(ScatterExpanderTest, EliminateScatterWithNonAssociativeCombiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT add.48 = f32[] add(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_TRUE(result);
}
TEST_F(ScatterExpanderTest, DoNotEliminateScatterWithAssociativeFp32Combiner) {
const char* const kModuleStr = R"(
HloModule scatter_expander
scatter_computation {
arg1.173 = f32[] parameter(1)
arg0.172 = f32[] parameter(0)
ROOT max.48 = f32[] maximum(arg0.172, arg1.173)
}
ENTRY fused_computation {
bitcast.2335 = f32[1,4096] parameter(0)
pad.96 = s32[4096,2] parameter(1)
bitcast.2748 = f32[4096,1,1] parameter(2)
ROOT scatter.48 = f32[1,4096] scatter(bitcast.2335, pad.96, bitcast.2748),
update_window_dims={1,2}, inserted_window_dims={},
scatter_dims_to_operand_dims={0,1}, index_vector_dim=1,
to_apply=scatter_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
ScatterExpander scatter_expander(
ScatterExpander::kEliminateIndeterministicScatters);
TF_ASSERT_OK_AND_ASSIGN(bool result,
RunHloPass(&scatter_expander, module.get()));
EXPECT_FALSE(result);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/scatter_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f01b2e57-26fb-44db-948b-b0d918a37457 | cpp | tensorflow/tensorflow | eval_const_tensor | tensorflow/core/common_runtime/eval_const_tensor.cc | tensorflow/core/common_runtime/eval_const_tensor_test.cc | #include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <algorithm>
#include <cstdint>
#include <deque>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace {
using ::tensorflow::shape_inference::InferenceContext;
bool IsRank(const Node& n) { return n.type_string() == "Rank"; }
bool IsSize(const Node& n) { return n.type_string() == "Size"; }
bool IsShape(const Node& n) { return n.type_string() == "Shape"; }
bool IsStridedSlice(const Node& n) { return n.type_string() == "StridedSlice"; }
bool IsPlaceholderWithDefault(const Node& n) {
return n.type_string() == "PlaceholderWithDefault";
}
bool IsUnstack(const Node& n) { return n.type_string() == "Unpack"; }
bool HasIntAttr(const Node& n, absl::string_view name, int64_t expected) {
int64_t actual;
return TryGetNodeAttr(n.def(), name, &actual) && actual == expected;
}
std::optional<int64_t> GetIntConst(const Node& node) {
const TensorProto* proto;
Tensor tensor;
if (node.IsConstant() && TryGetNodeAttr(node.def(), "value", &proto) &&
(proto->dtype() == DT_INT32 || proto->dtype() == DT_INT64) &&
TensorShape(proto->tensor_shape()).num_elements() == 1 &&
tensor.FromProto(*proto)) {
if (proto->dtype() == DT_INT32) {
return *static_cast<const int32_t*>(tensor.data());
} else {
return *static_cast<const int64_t*>(tensor.data());
}
}
return std::nullopt;
}
std::optional<int64_t> GetSliceIndex(const Node& node, const int node_output) {
std::optional<int64_t> ix;
if (IsUnstack(node)) {
if (HasIntAttr(node, "axis", 0)) {
ix = node_output;
}
} else if (IsStridedSlice(node)) {
const Edge* edge;
if (HasIntAttr(node, "begin_mask", 0) && HasIntAttr(node, "end_mask", 0) &&
HasIntAttr(node, "ellipsis_mask", 0) &&
HasIntAttr(node, "new_axis_mask", 0) &&
HasIntAttr(node, "shrink_axis_mask", 1) &&
node.input_edge(1, &edge).ok()) {
ix = GetIntConst(*edge->src());
}
}
return ix;
}
absl::StatusOr<std::optional<Tensor>> TryInferFromShapes(
const Node& node, const int node_output, const ShapeRefiner& refiner) {
std::optional<Tensor> result;
if (node.num_inputs() == 0 || node_output >= node.num_outputs()) {
return result;
}
const auto dtype = node.output_type(node_output);
if (dtype != DT_INT32 && dtype != DT_INT64) {
return result;
}
absl::InlinedVector<int64_t, 8> data;
std::optional<TensorShape> shape;
const Edge* edge;
if (IsShape(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
for (int i = 0; i < rank; ++i) {
data.push_back(c->Value(c->Dim(c->input(0), i)));
}
shape.emplace({rank});
}
} else if (IsRank(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->RankKnown(c->input(0))) {
data.push_back(c->Rank(c->input(0)));
shape.emplace();
}
} else if (IsSize(node)) {
InferenceContext* c = refiner.GetContext(&node);
if (c != nullptr && c->FullyDefined(c->input(0))) {
int64_t size = 1;
for (int i = 0, rank = c->Rank(c->input(0)); i < rank; i++) {
size *= c->Value(c->Dim(c->input(0), i));
}
data.push_back(size);
shape.emplace();
}
} else if (node.input_edge(0, &edge).ok() && IsShape(*edge->src())) {
InferenceContext* c = refiner.GetContext(edge->src());
if (c != nullptr && c->RankKnown(c->input(0))) {
const int64_t rank = c->Rank(c->input(0));
std::optional<int64_t> ix = GetSliceIndex(node, node_output);
if (ix.has_value() && -rank <= *ix && *ix < rank &&
c->ValueKnown(c->Dim(c->input(0), *ix))) {
data.push_back(c->Value(c->Dim(c->input(0), *ix)));
shape.emplace();
}
}
}
if (!shape.has_value()) {
return result;
}
if (dtype == DT_INT32) {
for (const int64_t value : data) {
if (TF_PREDICT_FALSE(value >= std::numeric_limits<int32_t>::max())) {
return errors::InvalidArgument("Value is out of int32 range: ", value);
}
}
}
result.emplace(dtype, *shape);
if (dtype == DT_INT32) {
absl::c_copy(data, static_cast<int32_t*>(result->data()));
} else {
absl::c_copy(data, static_cast<int64_t*>(result->data()));
}
return result;
}
bool IsSupportedForEvaluation(const Node& node) {
if (node.IsConstant() || node.IsArg()) {
return true;
}
if (node.num_inputs() == 0 || IsPlaceholderWithDefault(node)) {
return false;
}
if (node.op_def().is_stateful()) {
return false;
}
if (node.IsEnter() || node.IsExit() || node.IsMerge()) {
return false;
}
if (node.IsFunctionCall()) {
return false;
}
for (const auto& [name, attr] : node.attrs()) {
if (attr.has_func() || !attr.list().func().empty()) {
return false;
}
}
return KernelDefAvailable(DEVICE_CPU, node.def());
}
struct Subgraph {
Subgraph(const OpRegistryInterface* op_registry, int32_t graph_def_version)
: graph(op_registry == nullptr ? OpRegistry::Global() : op_registry) {
VersionDef versions = graph.versions();
versions.set_producer(graph_def_version);
graph.set_versions(versions);
}
GraphRunner::NamedTensorList inputs;
Graph graph;
};
using NodeOutput = std::pair<const Node*, int>;
std::string OutputName(const NodeOutput& output) {
return strings::StrCat(output.first->name(), ":", output.second);
}
absl::StatusOr<std::unique_ptr<Subgraph>> ExtractConstantSubgraph(
const Node& target_node, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const OpRegistryInterface* op_registry, const int32_t graph_def_version) {
std::unique_ptr<Subgraph> subgraph;
if (!target_node.IsEnter() && !IsSupportedForEvaluation(target_node)) {
return subgraph;
}
std::vector<const Edge*> edges;
for (const Edge* edge : target_node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
absl::flat_hash_map<const Node*, Node*> new_by_old_node;
absl::InlinedVector<const Node*, 8> arg_nodes;
absl::flat_hash_map<NodeOutput, Tensor> const_inputs;
for (int edge_ix = 0; edge_ix < edges.size(); ++edge_ix) {
const Edge& edge = *edges[edge_ix];
const Node& node = *edge.src();
const NodeOutput node_output = {&node, edge.src_output()};
if (new_by_old_node.contains(&node) || const_inputs.contains(node_output)) {
continue;
}
if (node.IsArg()) {
arg_nodes.push_back(&node);
continue;
}
auto tensor = lookup(node, node_output.second);
if (!tensor.has_value()) {
TF_ASSIGN_OR_RETURN(
tensor, TryInferFromShapes(node, node_output.second, refiner));
}
if (tensor.has_value()) {
const_inputs.emplace(node_output, *std::move(tensor));
} else if (!IsSupportedForEvaluation(node)) {
return subgraph;
} else {
new_by_old_node.emplace(&node, nullptr);
for (const Edge* edge : node.in_edges()) {
if (!edge->IsControlEdge()) {
edges.push_back(edge);
}
}
}
}
bool all_args_provided = true;
for (const Node* node : arg_nodes) {
auto tensor = lookup(*node, 0);
all_args_provided = all_args_provided && tensor.has_value();
if (all_args_provided) {
const_inputs.emplace(NodeOutput{node, 0}, *std::move(tensor));
}
}
if (!all_args_provided) {
return subgraph;
}
subgraph = std::make_unique<Subgraph>(op_registry, graph_def_version);
auto& inputs = subgraph->inputs;
inputs.reserve(const_inputs.size());
for (auto& [node_output, tensor] : const_inputs) {
if (!new_by_old_node.contains(node_output.first)) {
inputs.emplace_back(OutputName(node_output), std::move(tensor));
}
}
Graph& graph = subgraph->graph;
new_by_old_node[&target_node] = graph.CopyNode(&target_node);
for (const Edge* edge : edges) {
Node*& src = new_by_old_node[edge->src()];
if (src == nullptr) {
src = graph.CopyNode(edge->src());
}
Node* dst = new_by_old_node.at(edge->dst());
graph.AddEdge(src, edge->src_output(), dst, edge->dst_input());
}
return subgraph;
}
}
absl::StatusOr<std::optional<Tensor>> EvaluateConstantTensor(
const Node& node, const int node_output, const ShapeRefiner& refiner,
const absl::FunctionRef<std::optional<Tensor>(const Node&, int)> lookup,
const std::optional<EvaluateConstantTensorRunner> runner) {
std::optional<Tensor> result;
if (result = lookup(node, node_output); result.has_value()) {
return result;
}
if (node.IsArg()) {
return result;
}
if (node.IsConstant()) {
const TensorProto* proto;
TF_RETURN_IF_ERROR(GetNodeAttr(node.def(), "value", &proto));
result.emplace();
if (TF_PREDICT_FALSE(!result->FromProto(*proto))) {
return errors::InvalidArgument("Unable to evaluate a constant node");
}
return result;
}
TF_ASSIGN_OR_RETURN(result, TryInferFromShapes(node, node_output, refiner));
if (result.has_value()) {
return result;
}
if (!runner.has_value()) {
return result;
}
TF_ASSIGN_OR_RETURN(
const auto subgraph,
ExtractConstantSubgraph(node, refiner, lookup, runner->op_registry,
runner->graph_def_version));
if (subgraph != nullptr) {
GraphRunner* graph_runner = runner->graph_runner;
std::unique_ptr<GraphRunner> tmp_graph_runner;
if (graph_runner == nullptr) {
tmp_graph_runner = std::make_unique<GraphRunner>(Env::Default());
graph_runner = tmp_graph_runner.get();
}
FunctionLibraryRuntime* function_library = nullptr;
std::vector<Tensor> outputs;
auto status =
graph_runner->Run(&subgraph->graph, function_library, subgraph->inputs,
{OutputName({&node, node_output})}, &outputs);
if (status.ok()) {
result = std::move(outputs[0]);
}
}
return result;
}
} | #include "tensorflow/core/common_runtime/eval_const_tensor.h"
#include <cstdint>
#include <limits>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/logging_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class EvaluateConstantTensorTest : public ::testing::Test {
public:
EvaluateConstantTensorTest& WithRunner() {
runner_ = EvaluateConstantTensorRunner{
scope_.graph()->op_registry(),
scope_.graph()->versions().producer(),
};
return *this;
}
absl::StatusOr<std::optional<Tensor>> Run(const Output& output) {
TF_RETURN_IF_ERROR(scope_.status());
const auto& graph = *scope_.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_RETURN_IF_ERROR(refiner.AddNode(node));
}
auto lookup = [this](const Node& node, int index) -> std::optional<Tensor> {
requested_.insert(&node);
auto it = cache_.find(std::make_pair(&node, index));
if (it == cache_.end()) {
return std::nullopt;
}
return it->second;
};
auto runner = runner_;
runner_ = std::nullopt;
requested_.clear();
return EvaluateConstantTensor(*output.node(), output.index(), refiner,
lookup, runner);
}
void ExpectTensor(const Output& output, const Tensor& expected) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_TRUE(actual.has_value());
test::ExpectEqual(*actual, expected);
}
void ExpectNull(const Output& output) {
TF_ASSERT_OK_AND_ASSIGN(auto actual, Run(output));
ASSERT_FALSE(actual.has_value());
}
void ExpectError(const Output& output) { EXPECT_FALSE(Run(output).ok()); }
protected:
Scope scope_ = Scope::NewRootScope();
absl::flat_hash_map<std::pair<const Node*, int>, Tensor> cache_;
absl::flat_hash_set<const Node*> requested_;
std::optional<EvaluateConstantTensorRunner> runner_ = std::nullopt;
};
template <typename T>
Output Placeholder(const Scope& scope, const PartialTensorShape& shape) {
return ops::Placeholder(scope, DataTypeToEnum<T>::value,
ops::Placeholder::Shape(shape));
}
Output Slice(const Scope& scope, const Output& input, int index) {
return ops::StridedSlice(
scope, input, ops::Const(scope, {index}), ops::Const(scope, {index + 1}),
ops::Const(scope, {1}), ops::StridedSlice::ShrinkAxisMask(1));
}
TEST_F(EvaluateConstantTensorTest, Constant) {
auto expected = test::AsTensor<float>({1, 2, 3});
auto op = ops::Const(scope_, expected);
ExpectTensor(op, expected);
}
TEST_F(EvaluateConstantTensorTest, Shape) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto shape = ops::Shape(scope_, input);
ExpectTensor(shape, test::AsTensor<int32_t>({2, 3, 5}));
}
TEST_F(EvaluateConstantTensorTest, ValueOutOfRange) {
const int64_t dim = std::numeric_limits<int32_t>::max();
auto input = Placeholder<float>(scope_, {dim});
auto shape32 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT32));
auto shape64 = ops::Shape(scope_, input, ops::Shape::OutType(DT_INT64));
ExpectError(shape32);
ExpectTensor(shape64, test::AsTensor<int64_t>({dim}));
}
TEST_F(EvaluateConstantTensorTest, PartialShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
ExpectNull(shape);
}
TEST_F(EvaluateConstantTensorTest, Rank) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto rank = ops::Rank(scope_, input);
ExpectTensor(rank, test::AsScalar<int32_t>(3));
}
TEST_F(EvaluateConstantTensorTest, Size) {
auto input = Placeholder<float>(scope_, {2, 3, 5});
auto size = ops::Size(scope_, input);
ExpectTensor(size, test::AsScalar<int32_t>(2 * 3 * 5));
}
TEST_F(EvaluateConstantTensorTest, PartialSize) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto size = ops::Size(scope_, input);
ExpectNull(size);
}
TEST_F(EvaluateConstantTensorTest, SliceShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto slice0 = Slice(scope_, shape, 0);
auto slice1 = Slice(scope_, shape, 1);
auto slice2 = Slice(scope_, shape, 2);
ExpectTensor(slice0, test::AsScalar<int32_t>(2));
ExpectNull(slice1);
ExpectTensor(slice2, test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, UnpackShape) {
auto input = Placeholder<float>(scope_, {2, -1, 5});
auto shape = ops::Shape(scope_, input);
auto unpack = ops::Unstack(scope_, shape, 3, ops::Unstack::Axis(0));
ExpectTensor(unpack[0], test::AsScalar<int32_t>(2));
ExpectNull(unpack[1]);
ExpectTensor(unpack[2], test::AsScalar<int32_t>(5));
}
TEST_F(EvaluateConstantTensorTest, Lookup) {
auto input = Placeholder<float>(scope_, {2});
ExpectNull(input);
auto expected = test::AsTensor<float>({3, 5});
cache_.emplace(std::make_pair(input.node(), 0), expected);
ExpectTensor(input, expected);
}
TEST_F(EvaluateConstantTensorTest, ConstantFolding) {
auto input1 = Placeholder<float>(scope_, {2, -1, 5});
auto input2 = ops::_Arg(scope_, DT_INT32, 0);
auto shape = ops::Shape(scope_, input1);
auto result = ops::Add(scope_, Slice(scope_, shape, 2), input2);
ExpectNull(result);
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(input2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 + 7));
}
TEST_F(EvaluateConstantTensorTest, DoNotEvalPlaceholderWithDefault) {
auto tensor = test::AsTensor<float>({1, 2, 3});
auto result1 = ops::Identity(scope_, tensor);
auto result2 = ops::PlaceholderWithDefault(scope_, tensor, tensor.shape());
WithRunner().ExpectTensor(result1, tensor);
WithRunner().ExpectNull(result2);
}
TEST_F(EvaluateConstantTensorTest, AllArgsMustBeRequestedForConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto result = ops::Mul(scope_, arg0, ops::Add(scope_, arg1, arg2));
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
EXPECT_TRUE(requested_.contains(arg0.node()));
EXPECT_TRUE(requested_.contains(arg1.node()));
EXPECT_TRUE(requested_.contains(arg2.node()));
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(5));
cache_.emplace(std::make_pair(arg2.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(5 * (3 + 7)));
}
TEST_F(EvaluateConstantTensorTest, NoArgsMustBeRequestedForNonConstSubgraph) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto arg2 = ops::_Arg(scope_, DT_INT32, 2);
auto feed = Placeholder<int32_t>(scope_, {});
auto result = ops::Mul(scope_, arg0,
ops::Add(scope_, arg1, ops::Add(scope_, arg2, feed)));
WithRunner().ExpectNull(result);
EXPECT_FALSE(requested_.contains(arg0.node()));
EXPECT_FALSE(requested_.contains(arg1.node()));
EXPECT_FALSE(requested_.contains(arg2.node()));
EXPECT_TRUE(requested_.contains(feed.node()));
}
TEST_F(EvaluateConstantTensorTest, MissingKernel) {
auto arg0 = ops::_Arg(scope_, DT_INT32, 0);
auto arg1 = ops::_Arg(scope_, DT_INT32, 1);
auto print = ops::Print(scope_, arg1, {arg1.output});
auto result = ops::Add(scope_, arg0, print);
ASSERT_FALSE(KernelDefAvailable(DEVICE_CPU, print.node()->def()));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg0.node(), 0), test::AsScalar<int32_t>(3));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(arg1.node(), 0), test::AsScalar<int32_t>(5));
WithRunner().ExpectNull(result);
cache_.emplace(std::make_pair(print.node(), 0), test::AsScalar<int32_t>(7));
WithRunner().ExpectTensor(result, test::AsScalar<int32_t>(3 + 7));
}
template <bool kEvaluated>
void BM_ConstantFolding(::testing::benchmark::State& state) {
Scope scope = Scope::NewRootScope();
auto input1 = Placeholder<float>(scope, {2, -1, 5});
auto input2 = ops::_Arg(scope, DT_INT32, 0);
auto input3 = ops::_Arg(scope, DT_INT32, 0);
auto shape = ops::Shape(scope, input1);
auto result =
ops::Mul(scope, ops::Add(scope, Slice(scope, shape, 2), input2), input3);
TF_CHECK_OK(scope.status());
const auto& graph = *scope.graph();
ShapeRefiner refiner(graph.versions(), graph.op_registry());
for (const auto* node : graph.nodes()) {
TF_CHECK_OK(refiner.AddNode(node));
}
auto tensor2 = test::AsScalar<int32_t>(7);
auto tensor3 = test::AsScalar<int32_t>(11);
auto lookup = [&](const Node& node, int index) -> std::optional<Tensor> {
if (kEvaluated && &node == input2.node()) {
return tensor2;
}
if (&node == input3.node()) {
return tensor3;
}
return std::nullopt;
};
GraphRunner graph_runner(Env::Default());
const EvaluateConstantTensorRunner runner = {
graph.op_registry(), graph.versions().producer(), &graph_runner};
for (auto unused : state) {
auto status_or =
EvaluateConstantTensor(*result.node(), 0, refiner, lookup, runner);
TF_CHECK_OK(status_or.status());
CHECK_EQ(status_or->has_value(), kEvaluated);
}
}
BENCHMARK_TEMPLATE(BM_ConstantFolding, false);
BENCHMARK_TEMPLATE(BM_ConstantFolding, true);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eval_const_tensor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eval_const_tensor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ebc77db0-35d1-443b-8e86-281f12c77876 | cpp | tensorflow/tensorflow | fully_connected_4bit | tensorflow/lite/kernels/internal/optimized/fully_connected_4bit.h | tensorflow/lite/kernels/fully_connected_4bit_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_FULLY_CONNECTED_4BIT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_FULLY_CONNECTED_4BIT_H_
#include <stdint.h>
#ifndef TFLITE_MMAP_DISABLED
#include <sys/mman.h>
#endif
#include <cstdlib>
#include <memory>
#if defined(FC_4BIT_SSE) && defined(__SSSE3__)
#include "tensorflow/lite/kernels/internal/optimized/4bit/sse_fully_connected.h"
#elif defined(FC_4BIT_NEON) && (defined(__ARM_NEON__) || defined(__ARM_NEON))
#include "tensorflow/lite/kernels/internal/optimized/4bit/neon_fully_connected.h"
#else
#include "tensorflow/lite/kernels/internal/optimized/4bit/fully_connected_reference.h"
#endif
namespace tflite {
namespace optimized_4bit {
constexpr int FilterWidth = 4;
constexpr int FilterDepth = 32;
constexpr int kDefaultAlignmentPadding = 63;
struct Deleter {
explicit Deleter(size_t size = 0) : size(size) {}
void operator()(uint8_t* memory) {
if (!memory) {
return;
}
#ifdef TFLITE_MMAP_DISABLED
delete[] memory;
#else
munmap(memory, size);
#endif
}
size_t size;
};
struct OpData4Bit {
int rows_right = 1;
int batch_size = 0;
bool needs_prepack = true;
uint8_t* prepacked_cache = nullptr;
std::unique_ptr<uint8_t[], Deleter> prepacked_cache_buffer;
size_t prepacked_cache_buffer_size = 0;
void AllocatePackedRegion(size_t required_size) {
#ifdef TFLITE_MMAP_DISABLED
uint8_t* region = new uint8_t[required_size];
prepacked_cache_buffer =
std::unique_ptr<uint8_t[], Deleter>(region, Deleter());
#else
uint8_t* region = reinterpret_cast<uint8_t*>(
mmap(nullptr, required_size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
prepacked_cache_buffer =
std::unique_ptr<uint8_t[], Deleter>(region, Deleter(required_size));
#ifdef MADV_MERGEABLE
madvise(region, required_size, MADV_MERGEABLE);
#endif
#endif
prepacked_cache = reinterpret_cast<uint8_t*>(
(reinterpret_cast<uintptr_t>(prepacked_cache_buffer.get()) +
kDefaultAlignmentPadding) &
~kDefaultAlignmentPadding);
prepacked_cache_buffer_size = required_size;
}
};
namespace api {
inline void Prepack(uint8_t* dest, const int8_t* tensor, int layout_rows,
int layout_cols, int src_rows, int src_cols, int width,
int depth) {
optimized_4bit::Prepack(dest, tensor, layout_rows, layout_cols, src_rows,
src_cols, width, depth);
}
inline void BatchQuantizeFloats4Bit(const float* float_data_ptr, int n_batch,
int n_data, int8_t* quantized_data_ptr,
float* scaling_factors, int width,
int depth, int32_t* input_offsets) {
optimized_4bit::BatchQuantizeFloats4Bit(float_data_ptr, n_batch, n_data,
quantized_data_ptr, scaling_factors,
width, depth, input_offsets);
}
inline void AssignBiasAndComputeOffsets(const int32_t* input_offsets,
const float* batch_scales,
float* filter_scales,
const float* bias_ptr,
float* output_ptr, int output_depth,
int batch_size) {
optimized_4bit::AssignBiasAndComputeOffsets(
input_offsets, batch_scales, filter_scales, bias_ptr, output_ptr,
output_depth, batch_size);
}
inline void RunAndUnpack(int rhs_width, const uint8_t* lhs, const int8_t* rhs,
int32_t* dst, int output_depth, int batch_size,
int lhs_layout_rows, int lhs_layout_cols,
int rhs_layout_rows, int rhs_layout_cols,
int dst_layout_rows, int dst_layout_cols,
float* output_ptr, const float* scaling_factors,
const float* filter_scales) {
optimized_4bit::RunAndUnpack(
rhs_width, lhs, rhs, dst, output_depth, batch_size, lhs_layout_rows,
lhs_layout_cols, rhs_layout_rows, rhs_layout_cols, dst_layout_rows,
dst_layout_cols, output_ptr, scaling_factors, filter_scales);
}
}
}
}
#endif | #include <cstdlib>
#include <memory>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/fully_connected.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
class FullyConnected4BitOpModel : public SingleOpModel {
public:
FullyConnected4BitOpModel(
int units, int batches, const TensorData& input,
const TensorData& weights, const TensorData& output,
std::vector<int8_t> weights_initializer, TfLiteRegistration* registration,
ActivationFunctionType activation_func = ActivationFunctionType_RELU)
: batches_(batches), units_(units) {
int total_input_size = 1;
for (size_t i = 0; i < input.shape.size(); ++i) {
total_input_size *= input.shape[i];
}
input_size_ = total_input_size / batches_;
input_ = AddInput(input);
const std::vector<int8_t> quantized_data(weights_initializer);
std::vector<int8_t> weight_data(quantized_data.size() / 2);
for (int i = 0; i < quantized_data.size(); i++) {
uint8_t val = quantized_data[i] & UINT8_C(15);
if ((i % 2) == 0) {
weight_data[i / 2] = val & INT8_C(15);
} else {
weight_data[i / 2] |= (val << 4);
}
}
weights_ =
AddConstInput<int8_t>(weights, weight_data.data(), weight_data.size());
bias_ = AddInput({TensorType_FLOAT32, {units_}});
output_ = AddOutput(output);
FullyConnectedOptionsWeightsFormat weights_format =
FullyConnectedOptionsWeightsFormat_DEFAULT;
SetBuiltinOp(BuiltinOperator_FULLY_CONNECTED,
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(builder_, activation_func,
weights_format, true)
.Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_FULLY_CONNECTED, registration);
BuildInterpreter({GetShape(input_), GetShape(weights_), GetShape(bias_)});
SetUnitScale();
}
void SetUnitScale() {
TfLiteTensor* t = interpreter_->tensor(weights_);
t->type = kTfLiteInt4;
t->params.scale = 1.0;
auto filter_params =
reinterpret_cast<TfLiteAffineQuantization*>(t->quantization.params);
if (filter_params && filter_params->scale &&
filter_params->scale->size > 0) {
for (int i = 0; i < filter_params->scale->size; i++) {
filter_params->scale->data[i] = 1.0;
}
}
}
void SetInput(const std::vector<float>& f) { PopulateTensor(input_, f); }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
void SetBias(const std::vector<float>& f) { PopulateTensor(bias_, f); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int bias_;
int output_;
int batches_;
int units_;
int input_size_;
bool use_native_int4_ = false;
};
TEST(Hybrid4BitFullyConnectedOpTest, SimpleTestHybridInt4) {
int units = 5;
int batches = 4;
int cols = 40;
FullyConnected4BitOpModel m(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}},
{
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
},
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
m.SetBias({1, 2, 3, 1, 2});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
});
m.Invoke();
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{393., 456., 457., 455., 394., 413., 476., 477., 475., 414.,
393., 456., 457., 455., 394., 393., 456., 457., 455., 394},
1.3f)));
}
TEST(Hybrid4BitFullyConnectedOpTest, TestHybridInt4AllZeroBatch) {
int units = 5;
int batches = 4;
int cols = 40;
FullyConnected4BitOpModel m(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}},
{
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
1, 2, 3, 4, 5, 6, 7, 1, 2, -3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
-1, 2, 3, 4, 5, 6, 7, 1, 2, 3, -1, 2, 3, 4, 5, 6, 7, 1, 2, 3,
},
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
m.SetBias({1, 2, 3, 1, 2});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, 8, -9, -10, 1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
});
m.Invoke();
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{393., 456., 457., 455., 394., 413., 476., 477., 475., 414.,
393., 456., 457., 455., 394., 1, 2, 3, 1, 2},
1.3f)));
}
std::mt19937 random_engine(2023);
std::uniform_real_distribution<float> real_dist(0.f, 1.f);
std::uniform_int_distribution<int32_t> int_dist(-7, 7);
class Hybrid4BitFullyConnectedVsReferenceOpTests
: public ::testing::TestWithParam<::testing::tuple<int, int, int>> {};
TEST_P(Hybrid4BitFullyConnectedVsReferenceOpTests, TestHybridInt4) {
auto params = GetParam();
int units = std::get<0>(params);
int batches = std::get<1>(params);
int cols = std::get<2>(params);
std::vector<int8_t> weight_data(units * cols, 0);
std::vector<float> input_data(batches * cols, 0);
std::vector<float> bias_data(units, 0);
for (int i = 0; i < units * cols; ++i) {
weight_data[i] = int_dist(random_engine);
}
for (int i = 0; i < batches * cols; ++i) {
input_data[i] = real_dist(random_engine);
}
for (int i = 0; i < units; ++i) {
bias_data[i] = real_dist(random_engine);
}
FullyConnected4BitOpModel test(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}}, weight_data,
ops::builtin::Register_FULLY_CONNECTED_GENERIC_OPT(),
ActivationFunctionType_RELU);
test.SetBias(bias_data);
test.SetInput(input_data);
test.Invoke();
std::vector<float> test_data = test.GetOutput();
FullyConnected4BitOpModel expected(
units, batches,
{TensorType_FLOAT32, {batches, cols}},
{TensorType_INT4, {units, cols}, 0.0, 0.0, 1.0},
{TensorType_FLOAT32, {units, batches}}, weight_data,
ops::builtin::Register_FULLY_CONNECTED_REF(),
ActivationFunctionType_RELU);
expected.SetBias(bias_data);
expected.SetInput(input_data);
expected.Invoke();
std::vector<float> expected_data = expected.GetOutput();
EXPECT_THAT(test_data, ElementsAreArray(ArrayFloatNear(
expected_data, 1e-3f)));
}
INSTANTIATE_TEST_SUITE_P(Hybrid4BitFullyConnectedVsReferenceOpTests,
Hybrid4BitFullyConnectedVsReferenceOpTests,
::testing::ValuesIn({
std::make_tuple(4, 1, 32),
std::make_tuple(4, 1, 64),
std::make_tuple(5, 1, 128),
std::make_tuple(5, 4, 128),
std::make_tuple(5, 6, 128),
std::make_tuple(5, 1, 38),
std::make_tuple(5, 4, 72),
std::make_tuple(5, 6, 130),
std::make_tuple(4, 1, 56),
std::make_tuple(4, 1, 48),
std::make_tuple(4, 1, 120),
}));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/optimized/fully_connected_4bit.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/fully_connected_4bit_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
47d9aac5-874a-4c9c-ab63-6ce9ce4b9f1b | cpp | tensorflow/tensorflow | make_sloppy | tensorflow/core/grappler/optimizers/data/make_sloppy.cc | tensorflow/core/grappler/optimizers/data/make_sloppy_test.cc | #include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
namespace tensorflow {
namespace grappler {
Status MakeSloppy::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (graph_utils::HasSloppyAttr(node.op())) {
(*node.mutable_attr())["sloppy"].set_b(true);
stats->num_changes++;
}
if (graph_utils::HasDeterministicAttr(node.op()) &&
node.attr().at("deterministic").s() == "default") {
(*node.mutable_attr())["deterministic"].set_s("false");
stats->num_changes++;
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MakeSloppy, "make_sloppy");
}
} | #include "tensorflow/core/grappler/optimizers/data/make_sloppy.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
TEST(MakeSloppy, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV2Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapNode("map", "range",
"num_parallel_calls", "XTimesTwo",
false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(MakeSloppy, ParseExampleDataset) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParseExampleNode("parse_example", "range",
"num_parallel_calls",
false)},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("parse_example", output));
int index = graph_utils::FindGraphNodeWithName("parse_example", output);
EXPECT_TRUE(output.node(index).attr().at("sloppy").b());
}
TEST(ChangeDefault, ParallelInterleave) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("cycle_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("block_length", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelInterleaveV4Node(
"interleave", "range", "cycle_length", "block_length",
"num_parallel_calls", "XTimesTwo", "default")},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("interleave", output));
int index = graph_utils::FindGraphNodeWithName("interleave", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelMap) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
graph_tests_utils::MakeParallelMapV2Node(
"map", "range", "num_parallel_calls", "XTimesTwo",
"default", false)},
{
test::function::XTimesTwo(),
});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map", output));
int index = graph_utils::FindGraphNodeWithName("map", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
TEST(ChangeDefault, ParallelBatch) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("num_parallel_calls", "Const", {},
{{"value", 1}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
graph_tests_utils::MakeParallelBatchNode(
"batch", "range", "batch_size", "num_parallel_calls",
"drop_remainder", "default")},
{});
MakeSloppy optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("batch", output));
int index = graph_utils::FindGraphNodeWithName("batch", output);
EXPECT_EQ(output.node(index).attr().at("deterministic").s(), "false");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_sloppy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/make_sloppy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e114bc94-ba21-4c16-9d98-227c026a9002 | cpp | google/cel-cpp | optional_type | common/types/optional_type.cc | common/types/optional_type_test.cc | #include <cstddef>
#include "absl/base/attributes.h"
#include "absl/strings/string_view.h"
#include "common/type.h"
namespace cel {
namespace common_internal {
namespace {
struct OptionalTypeData final {
const absl::string_view name;
const size_t parameters_size;
const Type parameter;
};
union DynOptionalTypeData final {
OptionalTypeData optional;
OpaqueTypeData opaque;
};
static_assert(offsetof(OptionalTypeData, name) ==
offsetof(OpaqueTypeData, name));
static_assert(offsetof(OptionalTypeData, parameters_size) ==
offsetof(OpaqueTypeData, parameters_size));
static_assert(offsetof(OptionalTypeData, parameter) ==
offsetof(OpaqueTypeData, parameters));
ABSL_CONST_INIT const DynOptionalTypeData kDynOptionalTypeData = {
.optional =
{
.name = OptionalType::kName,
.parameters_size = 1,
.parameter = DynType(),
},
};
}
}
OptionalType::OptionalType()
: opaque_(&common_internal::kDynOptionalTypeData.opaque) {}
Type OptionalType::GetParameter() const { return GetParameters().front(); }
} | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace {
TEST(OptionalType, Default) {
OptionalType optional_type;
EXPECT_EQ(optional_type.GetParameter(), DynType());
}
TEST(OptionalType, Kind) {
google::protobuf::Arena arena;
EXPECT_EQ(OptionalType(&arena, BoolType()).kind(), OptionalType::kKind);
EXPECT_EQ(Type(OptionalType(&arena, BoolType())).kind(), OptionalType::kKind);
}
TEST(OptionalType, Name) {
google::protobuf::Arena arena;
EXPECT_EQ(OptionalType(&arena, BoolType()).name(), OptionalType::kName);
EXPECT_EQ(Type(OptionalType(&arena, BoolType())).name(), OptionalType::kName);
}
TEST(OptionalType, DebugString) {
google::protobuf::Arena arena;
{
std::ostringstream out;
out << OptionalType(&arena, BoolType());
EXPECT_EQ(out.str(), "optional_type<bool>");
}
{
std::ostringstream out;
out << Type(OptionalType(&arena, BoolType()));
EXPECT_EQ(out.str(), "optional_type<bool>");
}
}
TEST(OptionalType, Parameter) {
google::protobuf::Arena arena;
EXPECT_EQ(OptionalType(&arena, BoolType()).GetParameter(), BoolType());
}
TEST(OptionalType, Hash) {
google::protobuf::Arena arena;
EXPECT_EQ(absl::HashOf(OptionalType(&arena, BoolType())),
absl::HashOf(OptionalType(&arena, BoolType())));
}
TEST(OptionalType, Equal) {
google::protobuf::Arena arena;
EXPECT_EQ(OptionalType(&arena, BoolType()), OptionalType(&arena, BoolType()));
EXPECT_EQ(Type(OptionalType(&arena, BoolType())),
OptionalType(&arena, BoolType()));
EXPECT_EQ(OptionalType(&arena, BoolType()),
Type(OptionalType(&arena, BoolType())));
EXPECT_EQ(Type(OptionalType(&arena, BoolType())),
Type(OptionalType(&arena, BoolType())));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/optional_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/optional_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b428a4e6-f411-4fe5-b691-f4f37449cc1c | cpp | google/langsvr | optional | include/langsvr/optional.h | src/optional_test.cc | #ifndef LANGSVR_OPTIONAL_H_
#define LANGSVR_OPTIONAL_H_
#include <cassert>
#include <type_traits>
#include <utility>
namespace langsvr {
template <typename T>
struct Optional {
Optional() = default;
~Optional() { Reset(); }
Optional(const Optional& other) { *this = other; }
Optional(Optional&& other) { *this = std::move(other); }
Optional(const T& other) { *this = other; }
Optional(T&& other) { *this = std::move(other); }
void Reset() {
if (ptr) {
delete ptr;
ptr = nullptr;
}
}
Optional& operator=(const Optional& other) {
Reset();
if (other.ptr) {
ptr = new T(*other);
}
return *this;
}
Optional& operator=(Optional&& other) {
Reset();
ptr = other.ptr;
other.ptr = nullptr;
return *this;
}
Optional& operator=(const T& value) {
Reset();
if (!ptr) {
ptr = new T(value);
}
return *this;
}
Optional& operator=(T&& value) {
Reset();
if (!ptr) {
ptr = new T(std::move(value));
}
return *this;
}
operator bool() const { return ptr != nullptr; }
bool operator!() const { return ptr == nullptr; }
T* operator->() { return &Get(); }
const T* operator->() const { return &Get(); }
T& operator*() { return Get(); }
const T& operator*() const { return Get(); }
template <typename V>
bool operator==(V&& value) const {
if constexpr (std::is_same_v<Optional, std::decay_t<V>>) {
return (!*this && !value) || (*this && value && (Get() == value.Get()));
} else {
if (!ptr) {
return false;
}
return Get() == std::forward<V>(value);
}
}
template <typename V>
bool operator!=(V&& value) const {
return !(*this == std::forward<V>(value));
}
private:
T& Get() {
assert(ptr);
return *ptr;
}
const T& Get() const {
assert(ptr);
return *ptr;
}
T* ptr = nullptr;
};
}
#endif | #include "langsvr/optional.h"
#include "gmock/gmock.h"
namespace langsvr {
namespace {
TEST(OptionalTest, CtorNoArgs) {
Optional<std::string> opt;
EXPECT_FALSE(opt);
EXPECT_TRUE(!opt);
EXPECT_NE(opt, "hello");
}
TEST(OptionalTest, CopyCtorWithValue) {
std::string val{"hello"};
Optional<std::string> opt{val};
EXPECT_TRUE(opt);
EXPECT_FALSE(!opt);
EXPECT_EQ(opt, "hello");
EXPECT_NE(opt, "world");
EXPECT_EQ(*opt, "hello");
}
TEST(OptionalTest, MoveCtorWithValue) {
std::string val{"hello"};
Optional<std::string> opt{std::move(val)};
EXPECT_TRUE(opt);
EXPECT_FALSE(!opt);
EXPECT_EQ(opt, "hello");
EXPECT_NE(opt, "world");
EXPECT_EQ(*opt, "hello");
}
TEST(OptionalTest, CopyCtorWithOptional) {
Optional<std::string> other{"hello"};
Optional<std::string> opt{other};
EXPECT_TRUE(opt);
EXPECT_FALSE(!opt);
EXPECT_EQ(opt, "hello");
EXPECT_NE(opt, "world");
EXPECT_EQ(*opt, "hello");
}
TEST(OptionalTest, MoveCtorWithOptional) {
Optional<std::string> other{"hello"};
Optional<std::string> opt{std::move(other)};
EXPECT_TRUE(opt);
EXPECT_FALSE(!opt);
EXPECT_EQ(opt, "hello");
EXPECT_NE(opt, "world");
EXPECT_EQ(*opt, "hello");
}
}
} | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/include/langsvr/optional.h | https://github.com/google/langsvr/blob/303c526231a90049a3e384549720f3fbd453cf66/src/optional_test.cc | 303c526231a90049a3e384549720f3fbd453cf66 |
d46b1e94-dd76-416b-ac17-f2a57bb63e60 | cpp | tensorflow/tensorflow | batch_op_rewriter | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.cc | tensorflow/core/grappler/optimizers/inference/batch_op_rewriter_test.cc | #include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <functional>
#include <string>
#include "google/protobuf/wrappers.pb.h"
#include "google/protobuf/map.h"
#include "google/protobuf/repeated_field.h"
#include "absl/status/status.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchFunction[] = "BatchFunction";
constexpr char kBatchOpRewriteConfigParamKey[] = "batch_op_rewrite_config";
constexpr char kNumBatchThreadsAttr[] = "num_batch_threads";
constexpr char kMaxBatchSizeAttr[] = "max_batch_size";
constexpr char kBatchTimeoutMicrosAttr[] = "batch_timeout_micros";
constexpr char kAllowedBatchSizesAttr[] = "allowed_batch_sizes";
constexpr char kMaxEnqueuedBatchesAttr[] = "max_enqueued_batches";
constexpr char kEnableLargeBatchSplitting[] = "enable_large_batch_splitting";
constexpr int64 kBoostMicrosNotSet = -1;
using BatchOpRewriteFunction = std::function<void(NodeDef* batch_op)>;
}
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::Status;
using ::tensorflow::grappler::Cluster;
using ::tensorflow::grappler::GrapplerItem;
namespace {
struct AdaptiveBatchSchedulerParams {
int32 initial_inflight_batches;
int32 min_inflight_batches;
int32 max_inflight_batches;
int32 batches_to_average_over;
int64_t full_batch_scheduling_boost_micros;
};
AdaptiveBatchSchedulerParams GetAdaptiveBatchSchedulerParams(
const BatchOpRewriteConfig::AdaptiveBatchSchedulerOption& option) {
AdaptiveBatchSchedulerParams params;
params.min_inflight_batches =
option.has_min_inflight_batches_limit()
? option.min_inflight_batches_limit().value()
: kMinInflightBatches;
params.initial_inflight_batches =
option.has_initial_inflight_batches_limit()
? option.initial_inflight_batches_limit().value()
: kInitialInflightBatches;
params.max_inflight_batches =
option.has_max_inflight_batches_limit()
? option.max_inflight_batches_limit().value()
: kMaxInflightBatches;
params.batches_to_average_over =
option.has_batches_to_average_over()
? option.batches_to_average_over().value()
: kBatchesToAverageOver;
params.full_batch_scheduling_boost_micros =
option.has_full_batch_scheduling_boost_micros()
? option.full_batch_scheduling_boost_micros().value()
: kBoostMicrosNotSet;
return params;
}
void SetNodeAttrs(const AdaptiveBatchSchedulerParams& params, NodeDef* node) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMaxInflightBatchesAttr, params.max_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kMinInflightBatchesAttr, params.min_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kInitialInflightBatchesAttr, params.initial_inflight_batches, node);
::tensorflow::graph_transforms::SetNodeAttr(
kBatchesToAverageOverAttr, params.batches_to_average_over, node);
if (params.full_batch_scheduling_boost_micros != -1) {
::tensorflow::graph_transforms::SetNodeAttr(
kFullBatchSchedulingBoostMicros,
params.full_batch_scheduling_boost_micros, node);
}
}
void UpdateBatchOps(GraphDef* graph, BatchOpRewriteFunction rewrite_fn) {
for (int i = 0; i < graph->node_size(); ++i) {
NodeDef* node = graph->mutable_node(i);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
for (int i = 0; i < graph->library().function_size(); i++) {
FunctionDef* function_def = graph->mutable_library()->mutable_function(i);
for (int j = 0; j < function_def->node_def_size(); j++) {
NodeDef* node = function_def->mutable_node_def(j);
if (node->op() == kBatchFunction) {
rewrite_fn(node);
}
}
}
}
}
Status BatchOpRewriter::Init(
const ::tensorflow::RewriterConfig_CustomGraphOptimizer* config) {
if (config->parameter_map().find(kBatchOpRewriteConfigParamKey) ==
config->parameter_map().end()) {
return absl::InternalError(
"batch_op_rewrite_config param must be set in the rewriter config "
"with a serialized/encoded BatchOpRewriteConfig.");
}
const auto& params =
config->parameter_map().at(kBatchOpRewriteConfigParamKey);
std::string unencoded;
if (params.s().empty()) {
VLOG(2) << "Empty batch-op rewrite config";
return absl::OkStatus();
}
if (!absl::Base64Unescape(params.s(), &unencoded)) {
return absl::InternalError(
"Failed to unencode batch_op_rewrite_config from params.");
}
if (!config_.ParseFromString(unencoded)) {
return absl::InternalError(
"Failed to parse batch_op_rewrite_config from params.");
}
VLOG(2) << "BatchOp Rewrite config is " << config_.DebugString();
return absl::OkStatus();
}
Status BatchOpRewriter::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
VLOG(2) << "Running BatchOp Rewriter";
*optimized_graph = item.graph;
bool asbs_overridden = false;
if (config_proto_.has_experimental() &&
config_proto_.experimental().has_session_metadata()) {
const string model_name =
config_proto_.experimental().session_metadata().name();
if (!config_.model_scheduler_options().empty()) {
return absl::InvalidArgumentError(
"model_scheduler_options is deprecated. Please use the "
"adaptive_batch_scheduler_option field in batch_options instead.");
}
auto model_batch_options = config_.batch_options().find(model_name);
if (model_batch_options != config_.batch_options().end()) {
auto& batch_options = model_batch_options->second;
VLOG(2) << "Rewriting batch_options for " << model_name << " to "
<< batch_options.DebugString();
if (batch_options.has_adaptive_batch_scheduler_option()) {
AdaptiveBatchSchedulerParams params = GetAdaptiveBatchSchedulerParams(
batch_options.adaptive_batch_scheduler_option());
if ((params.min_inflight_batches > params.max_inflight_batches) ||
(params.initial_inflight_batches < params.min_inflight_batches) ||
(params.initial_inflight_batches > params.max_inflight_batches)) {
return absl::InvalidArgumentError(absl::StrCat(
"Requires min_inflight_batches <= initial_inflight_batches "
"and initial_inflight_batches <= max_inflight_batches; Got "
"{min_inflight_batches : ",
params.min_inflight_batches,
", initial_inflight_batches : ", params.initial_inflight_batches,
", max_inflight_batches : ", params.max_inflight_batches, "}."));
}
asbs_overridden = true;
UpdateBatchOps(optimized_graph, [¶ms](NodeDef* batch_op) {
SetNodeAttrs(params, batch_op);
});
}
if (config_.enable_adaptive_shared_batching_thread_pool() &&
!asbs_overridden && batch_options.has_num_batch_threads() &&
batch_options.num_batch_threads() != 0) {
return absl::InvalidArgumentError(
"Unable to enable adapative shared batching because it requires "
"num_batch_threads=0 but the BatchOpRewriteConfig is also trying "
"to set num_batch_threads. Set either set "
"enable_adaptive_shared_batching_thread_pool or num_batch_threads "
"but not both.");
}
UpdateBatchOps(optimized_graph, [&batch_options](NodeDef* batch_op) {
if (batch_options.has_num_batch_threads()) {
::tensorflow::graph_transforms::SetNodeAttr(
kNumBatchThreadsAttr, batch_options.num_batch_threads(),
batch_op);
}
if (batch_options.has_max_batch_size()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxBatchSizeAttr, batch_options.max_batch_size(), batch_op);
}
if (batch_options.has_batch_timeout_micros()) {
::tensorflow::graph_transforms::SetNodeAttr(
kBatchTimeoutMicrosAttr, batch_options.batch_timeout_micros(),
batch_op);
}
if (!batch_options.allowed_batch_sizes().empty()) {
::tensorflow::graph_transforms::SetNodeAttr(
kAllowedBatchSizesAttr, batch_options.allowed_batch_sizes(),
batch_op);
}
if (batch_options.has_max_enqueued_batches()) {
::tensorflow::graph_transforms::SetNodeAttr(
kMaxEnqueuedBatchesAttr, batch_options.max_enqueued_batches(),
batch_op);
}
if (batch_options.has_disable_large_batch_splitting()) {
::tensorflow::graph_transforms::SetNodeAttr(
kEnableLargeBatchSplitting,
!batch_options.disable_large_batch_splitting(), batch_op);
}
});
}
}
if (asbs_overridden) {
return absl::OkStatus();
}
if (config_.enable_adaptive_shared_batching_thread_pool()) {
UpdateBatchOps(optimized_graph, [](NodeDef* batch_op) {
::tensorflow::graph_transforms::SetNodeAttr(kNumBatchThreadsAttr, 0,
batch_op);
});
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchOpRewriter, "batch_op_rewrite");
}
} | #include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.h"
#include <vector>
#include "google/protobuf/wrappers.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/escaping.h"
#include "absl/strings/substitute.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::RewriterConfig_CustomGraphOptimizer;
using ::tensorflow::Status;
using ::tensorflow::grappler::GrapplerItem;
using ::tensorflow::serving::BatchOpRewriteConfig;
void AddBatchOp(GraphDef* graph, int num_batch_threads = 16,
const absl::flat_hash_map<string, int>& reserved_int_attrs = {},
int max_batch_size = 16, int batch_timeout_micros = 10000,
const std::vector<int32>& allowed_batch_sizes = {8, 16},
int max_enqueued_batches = 1000,
bool disable_large_batch_splitting = false) {
auto set_batch_node_attribute = [&](const int32_t num_batch_threads,
NodeDef* batch_op) {
batch_op->set_name("cond/batch/BatchFunction");
batch_op->set_op("BatchFunction");
::tensorflow::graph_transforms::SetNodeAttr("num_batch_threads",
num_batch_threads, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_batch_size",
max_batch_size, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("batch_timeout_micros",
batch_timeout_micros, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("allowed_batch_sizes",
allowed_batch_sizes, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("max_enqueued_batches",
max_enqueued_batches, batch_op);
::tensorflow::graph_transforms::SetNodeAttr("enable_large_batch_splitting",
!disable_large_batch_splitting,
batch_op);
if (!reserved_int_attrs.empty()) {
::tensorflow::graph_transforms::SetNodeAttr(kEnableAdaptiveSchedulerAttr,
true, batch_op);
for (const auto& reserved_int_attr : reserved_int_attrs) {
::tensorflow::graph_transforms::SetNodeAttr(
reserved_int_attr.first, reserved_int_attr.second, batch_op);
}
}
};
set_batch_node_attribute(num_batch_threads, graph->add_node());
FunctionDefLibrary* function_def_lib = graph->mutable_library();
FunctionDef* function_def = function_def_lib->add_function();
set_batch_node_attribute(num_batch_threads, function_def->add_node_def());
}
RewriterConfig_CustomGraphOptimizer MakeConfig(
const BatchOpRewriteConfig& config) {
RewriterConfig_CustomGraphOptimizer rewriter_config;
(*rewriter_config.mutable_parameter_map())["batch_op_rewrite_config"].set_s(
absl::Base64Escape(config.SerializeAsString()));
return rewriter_config;
}
class BatchOpRewriterTest : public ::testing::TestWithParam<bool> {};
INSTANTIATE_TEST_SUITE_P(RewriteNumBatchThreads, BatchOpRewriterTest,
::testing::Bool());
TEST_P(BatchOpRewriterTest, Basic) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
GraphDef optimized_graph;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, GetParam() ? 0 : 16);
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_P(BatchOpRewriterTest, InvalidArgumentForAdaptiveBatchScheduler) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.Init(&rewriter_config));
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_version(123);
optimizer.config_proto_.mutable_experimental()
->mutable_session_metadata()
->set_name("model_with_override");
GraphDef optimized_graph;
Status status = optimizer.Optimize(nullptr, item, &optimized_graph);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(errors::IsInvalidArgument(status));
}
TEST_P(BatchOpRewriterTest, AdaptiveBatchScheduler) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(GetParam());
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_max_inflight_batches_limit()
->set_value(32);
(*config.mutable_batch_options())["model_with_override"]
.mutable_adaptive_batch_scheduler_option()
->mutable_full_batch_scheduling_boost_micros()
->set_value(12345);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 16 ,
{{kBatchesToAverageOverAttr, 1000},
{kInitialInflightBatchesAttr, 16},
{kMinInflightBatchesAttr, 8},
{kMaxInflightBatchesAttr, 32},
{kFullBatchSchedulingBoostMicros, 12345}});
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest, UpdateModelSchedulerOptions) {
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_batches_to_average_over()
->set_value(1000);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_initial_inflight_batches_limit()
->set_value(16);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_min_inflight_batches_limit()
->set_value(8);
(*config.mutable_model_scheduler_options())["model_with_override"]
.mutable_max_inflight_batches_limit()
->set_value(32);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph, 16);
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
TEST_F(BatchOpRewriterTest, UpdateBatchOptions) {
BatchOpRewriteConfig config;
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
(*config.mutable_batch_options())["model_with_override"].set_max_batch_size(
128);
(*config.mutable_batch_options())["model_with_override"]
.set_batch_timeout_micros(5000);
const std::vector<int32> allowed_batch_sizes{4, 32};
(*config.mutable_batch_options())["model_with_override"]
.mutable_allowed_batch_sizes()
->Add(allowed_batch_sizes.begin(), allowed_batch_sizes.end());
(*config.mutable_batch_options())["model_with_override"]
.set_max_enqueued_batches(500);
(*config.mutable_batch_options())["model_with_override"]
.set_disable_large_batch_splitting(true);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
GrapplerItem item;
AddBatchOp(&item.graph);
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &optimized_graph));
GraphDef expected_graph;
AddBatchOp(&expected_graph, 2 ,
{} , 128 ,
5000 , allowed_batch_sizes,
500 ,
true );
EXPECT_EQ(optimized_graph.DebugString(), expected_graph.DebugString());
}
TEST_F(BatchOpRewriterTest,
UpdateAdaptiveSharedBatchSchedulerAndNumBatchThreads) {
GrapplerItem item;
AddBatchOp(&item.graph, 16);
BatchOpRewriteConfig config;
config.set_enable_adaptive_shared_batching_thread_pool(true);
(*config.mutable_batch_options())["model_with_override"]
.set_num_batch_threads(2);
RewriterConfig_CustomGraphOptimizer rewriter_config = MakeConfig(config);
ConfigProto config_proto;
config_proto.mutable_experimental()->mutable_session_metadata()->set_version(
123);
config_proto.mutable_experimental()->mutable_session_metadata()->set_name(
"model_with_override");
BatchOpRewriter optimizer;
TF_ASSERT_OK(optimizer.InitWithConfig(config_proto, &rewriter_config));
GraphDef optimized_graph;
ASSERT_FALSE(optimizer.Optimize(nullptr, item, &optimized_graph).ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/inference/batch_op_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/inference/batch_op_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81f07eaa-6daf-4636-87c4-9733b688408d | cpp | tensorflow/tensorflow | layout | third_party/xla/xla/layout.cc | third_party/xla/xla/layout_test.cc | #include "xla/layout.h"
#include <cstdint>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/printer.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
TileProto Tile::ToProto() const {
TileProto tile_proto;
SetProto(tile_proto);
return tile_proto;
}
void Tile::SetProto(TileProto& tile_proto) const {
tile_proto.Clear();
for (int64_t i : dimensions()) {
tile_proto.add_dimensions(i);
}
}
void Tile::Print(Printer* printer) const {
printer->Append("(");
AppendJoin(printer, dimensions(), ",", [&](Printer* printer, int64_t dim) {
if (dim >= 0) {
printer->Append(dim);
} else {
if (dim == kCombineDimension) {
printer->Append("*");
} else {
printer->Append("Invalid value ");
printer->Append(dim);
}
}
});
printer->Append(")");
}
std::string Tile::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
Layout::Layout()
: index_primitive_type_(PRIMITIVE_TYPE_INVALID),
pointer_primitive_type_(PRIMITIVE_TYPE_INVALID) {}
SplitConfigProto SplitConfig::ToProto() const {
SplitConfigProto split_config_proto;
split_config_proto.set_dimension(dimension_);
for (int64_t i : split_indices_) {
split_config_proto.add_split_indices(i);
}
return split_config_proto;
}
void SplitConfig::SetProto(SplitConfigProto& split_config_proto) const {
split_config_proto.Clear();
split_config_proto.set_dimension(dimension_);
for (int64_t i : split_indices_) {
split_config_proto.add_split_indices(i);
}
}
std::string SplitConfig::ToString() const {
return absl::StrCat("(", dimension_, ":", absl::StrJoin(split_indices_, ","),
")");
}
Layout::Layout(absl::Span<const int64_t> minor_to_major)
: index_primitive_type_(PRIMITIVE_TYPE_INVALID),
pointer_primitive_type_(PRIMITIVE_TYPE_INVALID),
minor_to_major_(minor_to_major.begin(), minor_to_major.end()) {}
Layout::Layout(absl::Span<const int64_t> minor_to_major,
absl::Span<const Tile> tiles, int64_t element_size_in_bits)
: index_primitive_type_(PRIMITIVE_TYPE_INVALID),
pointer_primitive_type_(PRIMITIVE_TYPE_INVALID),
element_size_in_bits_(element_size_in_bits),
minor_to_major_(minor_to_major.begin(), minor_to_major.end()),
tiles_(tiles.begin(), tiles.end()) {}
Layout::Layout(absl::Span<const int64_t> minor_to_major,
absl::Span<const DimLevelType> dim_level_types,
absl::Span<const bool> dim_unique,
absl::Span<const bool> dim_ordered, absl::Span<const Tile> tiles,
int64_t tail_padding_alignment_in_elements,
PrimitiveType index_primitive_type,
PrimitiveType element_primitive_type,
int64_t element_size_in_bits, int64_t memory_space,
absl::Span<const SplitConfig> split_configs,
std::unique_ptr<Shape> physical_shape,
int64_t dynamic_shape_metadata_prefix_bytes)
: index_primitive_type_(index_primitive_type),
pointer_primitive_type_(element_primitive_type),
memory_space_(memory_space),
element_size_in_bits_(element_size_in_bits),
minor_to_major_(minor_to_major.begin(), minor_to_major.end()),
tiles_(tiles.begin(), tiles.end()),
split_configs_(split_configs.begin(), split_configs.end()),
tail_padding_alignment_in_elements_(tail_padding_alignment_in_elements),
physical_shape_(std::move(physical_shape)),
dynamic_shape_metadata_prefix_bytes_(
dynamic_shape_metadata_prefix_bytes) {
n_dim_level_types_ = dim_level_types.size();
n_dim_unique_ = dim_unique.size();
n_dim_ordered_ = dim_ordered.size();
const int n_attributes = std::max<int>(
n_dim_level_types_, std::max<int>(n_dim_unique_, n_dim_ordered_));
dim_attributes_.resize(n_attributes);
for (int i = 0; i < n_attributes; i++) {
if (i < n_dim_level_types_)
dim_attributes_[i].dim_level_type = dim_level_types[i];
if (i < n_dim_unique_) dim_attributes_[i].dim_unique = dim_unique[i];
if (i < n_dim_ordered_) dim_attributes_[i].dim_ordered = dim_ordered[i];
}
}
Layout::Layout(const Layout& other)
: dim_attributes_(other.dim_attributes_),
n_dim_level_types_(other.n_dim_level_types_),
n_dim_unique_(other.n_dim_unique_),
n_dim_ordered_(other.n_dim_ordered_),
index_primitive_type_(other.index_primitive_type_),
pointer_primitive_type_(other.pointer_primitive_type_),
memory_space_(other.memory_space_),
element_size_in_bits_(other.element_size_in_bits_),
minor_to_major_(other.minor_to_major_),
tiles_(other.tiles_),
split_configs_(other.split_configs_),
tail_padding_alignment_in_elements_(
other.tail_padding_alignment_in_elements_),
physical_shape_(other.physical_shape_ != nullptr
? std::make_unique<Shape>(*other.physical_shape_)
: nullptr),
dynamic_shape_metadata_prefix_bytes_(
other.dynamic_shape_metadata_prefix_bytes_) {}
Layout::Layout(Layout&& other) = default;
Layout::~Layout() = default;
Layout& Layout::operator=(const Layout& other) {
if (this != &other) {
dim_attributes_ = other.dim_attributes_;
n_dim_level_types_ = other.n_dim_level_types_;
n_dim_unique_ = other.n_dim_unique_;
n_dim_ordered_ = other.n_dim_ordered_;
minor_to_major_ = other.minor_to_major_;
tiles_ = other.tiles_;
tail_padding_alignment_in_elements_ =
other.tail_padding_alignment_in_elements_;
index_primitive_type_ = other.index_primitive_type_;
pointer_primitive_type_ = other.pointer_primitive_type_;
element_size_in_bits_ = other.element_size_in_bits_;
memory_space_ = other.memory_space_;
split_configs_ = other.split_configs_;
if (other.physical_shape_ != nullptr) {
physical_shape_ = std::make_unique<Shape>(*other.physical_shape_);
} else {
physical_shape_ = nullptr;
}
dynamic_shape_metadata_prefix_bytes_ =
other.dynamic_shape_metadata_prefix_bytes_;
}
return *this;
}
Layout& Layout::operator=(Layout&& other) = default;
Layout Layout::CreateFromProto(const LayoutProto& proto) {
Layout layout;
for (int dim_level_type : proto.dim_level_types()) {
layout.add_dim_level_type(static_cast<DimLevelType>(dim_level_type));
}
for (bool dim_unique : proto.dim_unique()) {
layout.add_dim_unique(dim_unique);
}
for (bool dim_ordered : proto.dim_ordered()) {
layout.add_dim_ordered(dim_ordered);
}
layout.minor_to_major_.reserve(proto.minor_to_major_size());
for (const int64_t dimension : proto.minor_to_major()) {
layout.add_minor_to_major(dimension);
}
for (const TileProto& tile_proto : proto.tiles()) {
*layout.add_tiles() = Tile::CreateFromProto(tile_proto);
}
if (proto.tail_padding_alignment_in_elements() != 0) {
layout.set_tail_padding_alignment_in_elements(
proto.tail_padding_alignment_in_elements());
} else {
layout.set_tail_padding_alignment_in_elements(1);
}
layout.set_index_primitive_type(proto.index_primitive_type());
layout.set_pointer_primitive_type(proto.pointer_primitive_type());
layout.set_element_size_in_bits(proto.element_size_in_bits());
layout.set_memory_space(proto.memory_space());
for (const SplitConfigProto& split_config_proto : proto.split_configs()) {
layout.add_split_configs(SplitConfig::CreateFromProto(split_config_proto));
}
if (proto.has_physical_shape()) {
*layout.mutable_physical_shape() = Shape(proto.physical_shape());
}
layout.set_dynamic_shape_metadata_prefix_bytes(
proto.dynamic_shape_metadata_prefix_bytes());
return layout;
}
LayoutProto Layout::ToProto() const {
LayoutProto proto;
SetProto(proto);
return proto;
}
void Layout::SetProto(LayoutProto& proto) const {
proto.Clear();
for (int i = 0; i < n_dim_level_types_; i++) {
proto.add_dim_level_types(dim_level_type(i));
}
for (int i = 0; i < n_dim_unique_; i++) {
proto.add_dim_unique(dim_unique(i));
}
for (int i = 0; i < n_dim_ordered_; i++) {
proto.add_dim_ordered(dim_ordered(i));
}
proto.mutable_minor_to_major()->Reserve(minor_to_major_size());
for (const int64_t dimension : minor_to_major()) {
proto.add_minor_to_major(dimension);
}
for (const Tile& tile : tiles()) {
tile.SetProto(*proto.add_tiles());
}
proto.set_tail_padding_alignment_in_elements(
tail_padding_alignment_in_elements());
proto.set_index_primitive_type(index_primitive_type());
proto.set_pointer_primitive_type(pointer_primitive_type());
proto.set_element_size_in_bits(element_size_in_bits_);
proto.set_memory_space(memory_space_);
for (const SplitConfig& split_config : split_configs()) {
split_config.SetProto(*proto.add_split_configs());
}
if (has_physical_shape()) {
*proto.mutable_physical_shape() = physical_shape_->ToProto();
}
proto.set_dynamic_shape_metadata_prefix_bytes(
dynamic_shape_metadata_prefix_bytes_);
}
namespace {
absl::string_view DimLevelTypeAbbrev(DimLevelType dim_level_type) {
switch (dim_level_type) {
case DIM_DENSE:
return "D";
case DIM_COMPRESSED:
return "C";
case DIM_SINGLETON:
return "S";
case xla::DIM_LOOSE_COMPRESSED:
return "H";
default:
LOG(FATAL) << "Invalid DimLevelType value: " << dim_level_type;
}
}
}
void Layout::Print(Printer* printer) const {
printer->Append("{");
AppendJoin(printer, minor_to_major(), ",");
bool colon_printed = false;
auto print_colon = [&]() {
if (colon_printed) return;
printer->Append(":");
colon_printed = true;
};
if (n_dim_level_types_ > 0) {
auto print_one = [&](int i) {
printer->Append(DimLevelTypeAbbrev(dim_level_type(i)));
if (n_dim_unique_ > 0 && !dim_unique(i)) {
printer->Append("+");
}
if (n_dim_ordered_ > 0 && !dim_ordered(i)) {
printer->Append("~");
}
};
print_colon();
printer->Append("D(");
print_one(0);
for (int i = 1; i < n_dim_level_types_; ++i) {
printer->Append(",");
print_one(i);
}
printer->Append(")");
}
if (!tiles().empty()) {
print_colon();
printer->Append("T");
for (const Tile& tile : tiles()) {
tile.Print(printer);
}
}
if (tail_padding_alignment_in_elements() != 1) {
print_colon();
printer->Append("L(");
printer->Append(tail_padding_alignment_in_elements());
printer->Append(")");
}
if (index_primitive_type() != PRIMITIVE_TYPE_INVALID) {
print_colon();
if (primitive_util::IsIntegralType(index_primitive_type())) {
printer->Append("#(");
printer->Append(
primitive_util::LowercasePrimitiveTypeName(index_primitive_type()));
printer->Append(")");
} else {
printer->Append("#(invalid)");
}
}
if (pointer_primitive_type() != PRIMITIVE_TYPE_INVALID) {
print_colon();
if (primitive_util::IsIntegralType(pointer_primitive_type())) {
printer->Append("*(");
printer->Append(
primitive_util::LowercasePrimitiveTypeName(pointer_primitive_type()));
printer->Append(")");
} else {
printer->Append("*(invalid)");
}
}
if (element_size_in_bits() != 0) {
print_colon();
printer->Append("E(");
printer->Append(element_size_in_bits());
printer->Append(")");
}
if (memory_space() != 0) {
print_colon();
printer->Append("S(");
printer->Append(memory_space());
printer->Append(")");
}
if (!split_configs().empty()) {
print_colon();
printer->Append("SC");
for (const auto& split_config : split_configs()) {
printer->Append(split_config.ToString());
}
}
if (has_physical_shape()) {
print_colon();
printer->Append("P(");
physical_shape_->Print(printer, true);
printer->Append(")");
}
if (dynamic_shape_metadata_prefix_bytes_ > 0) {
print_colon();
printer->Append("M(");
printer->Append(dynamic_shape_metadata_prefix_bytes());
printer->Append(")");
}
printer->Append("}");
}
std::string Layout::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
bool Layout::Equal::operator()(const Layout& lhs, const Layout& rhs) {
if (!LayoutUtil::IsDense(lhs) || !LayoutUtil::IsDense(rhs)) {
if (lhs.dim_level_types_size() != rhs.dim_level_types_size()) {
return false;
}
for (int i = 0; i < lhs.dim_level_types_size(); i++) {
if (lhs.dim_level_type(i) != rhs.dim_level_type(i)) {
return false;
}
}
if (lhs.dim_unique_size() != rhs.dim_unique_size()) {
return false;
}
for (int i = 0; i < lhs.dim_unique_size(); i++) {
if (lhs.dim_unique(i) != rhs.dim_unique(i)) {
return false;
}
}
if (lhs.dim_ordered_size() != rhs.dim_ordered_size()) {
return false;
}
for (int i = 0; i < lhs.dim_ordered_size(); i++) {
if (lhs.dim_ordered(i) != rhs.dim_ordered(i)) {
return false;
}
}
}
if (lhs.minor_to_major() != rhs.minor_to_major()) {
return false;
}
if (!ignore_tiles_ && lhs.tiles() != rhs.tiles()) {
return false;
}
if (!ignore_tail_padding_alignment_in_elements_ &&
lhs.tail_padding_alignment_in_elements() !=
rhs.tail_padding_alignment_in_elements()) {
return false;
}
if (!ignore_index_primitive_type_ &&
lhs.index_primitive_type() != rhs.index_primitive_type()) {
return false;
}
if (!ignore_pointer_primitive_type_ &&
lhs.pointer_primitive_type() != rhs.pointer_primitive_type()) {
return false;
}
if (!ignore_element_size_ &&
lhs.element_size_in_bits() != rhs.element_size_in_bits()) {
return false;
}
if (!ignore_memory_space_ && lhs.memory_space() != rhs.memory_space()) {
return false;
}
if (!ignore_split_configs_ && lhs.split_configs() != rhs.split_configs()) {
return false;
}
if (!ignore_physical_shape_) {
if (lhs.has_physical_shape() || rhs.has_physical_shape()) {
if (!lhs.has_physical_shape() || !rhs.has_physical_shape()) {
return false;
}
if (lhs.physical_shape() != rhs.physical_shape()) {
return false;
}
}
}
return true;
}
bool Layout::operator==(const Layout& other) const {
return Equal()(*this, other);
}
std::ostream& operator<<(std::ostream& out, const Tile& tile) {
out << tile.ToString();
return out;
}
std::ostream& operator<<(std::ostream& out, const Layout& layout) {
out << layout.ToString();
return out;
}
Shape* Layout::mutable_physical_shape() {
if (physical_shape_ == nullptr) {
physical_shape_ = std::make_unique<Shape>();
}
return physical_shape_.get();
}
void Layout::clear_physical_shape() { physical_shape_ = nullptr; }
Layout& Layout::DeleteDimension(int64_t dim_to_delete) {
for (int64_t i = 0; i < minor_to_major_.size();) {
if (minor_to_major_[i] == dim_to_delete) {
minor_to_major_.erase(minor_to_major_.begin() + i);
continue;
}
if (minor_to_major_[i] > dim_to_delete) {
minor_to_major_[i] -= 1;
}
++i;
}
if (LayoutUtil::IsSparse(*this)) {
if (dim_to_delete < n_dim_level_types_) n_dim_level_types_--;
if (dim_to_delete < n_dim_unique_) n_dim_unique_--;
if (dim_to_delete < n_dim_ordered_) n_dim_ordered_--;
dim_attributes_.erase(dim_attributes_.begin() + dim_to_delete);
}
return *this;
}
} | #include "xla/layout.h"
#include <cstdint>
#include <memory>
#include <sstream>
#include <vector>
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class LayoutTest : public ::testing::Test {};
TEST_F(LayoutTest, ToString) {
EXPECT_EQ(Layout().ToString(), "{}");
EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}");
EXPECT_EQ(Layout({4, 5, 6}).ToString(), "{4,5,6}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.ToString(),
"{3,2,1,0:T(42,123)(4,5)}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.set_tail_padding_alignment_in_elements(100)
.set_element_size_in_bits(42)
.ToString(),
"{3,2,1,0:T(42,123)(4,5)L(100)E(42)}");
EXPECT_EQ(Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})})
.set_memory_space(3)
.ToString(),
"{3,2,1,0:T(42,123)(4,5)S(3)}");
EXPECT_EQ(Layout({0, 1}, {}, {}, {}, {Tile({123})})
.add_split_configs(SplitConfig(0, {3}))
.add_split_configs(SplitConfig(1, {0, 4}))
.ToString(),
"{0,1:T(123)SC(0:3)(1:0,4)}");
}
TEST_F(LayoutTest, StreamOut) {
{
std::ostringstream oss;
oss << Tile({7, 8});
EXPECT_EQ(oss.str(), "(7,8)");
}
{
std::ostringstream oss;
oss << Layout({0, 1, 2});
EXPECT_EQ(oss.str(), "{0,1,2}");
}
}
TEST_F(LayoutTest, Equality) {
EXPECT_EQ(Layout(), Layout());
const std::vector<int64_t> empty_dims;
EXPECT_EQ(Layout(empty_dims), Layout(empty_dims));
EXPECT_EQ(Layout(), Layout(empty_dims));
EXPECT_EQ(Layout({0, 1, 2, 3}), Layout({0, 1, 2, 3}));
EXPECT_NE(Layout({0, 1, 2, 3}), Layout({0, 1, 2}));
EXPECT_EQ(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}));
EXPECT_NE(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 45})}));
EXPECT_NE(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2, 3}));
EXPECT_EQ(Layout({0, 1, 2}).set_element_size_in_bits(33),
Layout({0, 1, 2}).set_element_size_in_bits(33));
EXPECT_NE(Layout({0, 1, 2}).set_element_size_in_bits(33),
Layout({0, 1, 2}).set_element_size_in_bits(7));
EXPECT_EQ(Layout({0, 1, 2}).set_memory_space(3),
Layout({0, 1, 2}).set_memory_space(3));
EXPECT_NE(Layout({0, 1, 2}).set_memory_space(1),
Layout({0, 1, 2}).set_memory_space(3));
EXPECT_FALSE(Layout::Equal()(Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}),
Layout({0, 1, 2})));
EXPECT_EQ(Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})));
EXPECT_NE(Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {3})));
EXPECT_TRUE(Layout::Equal().IgnoreTiles()(
Layout({0, 1, 2}, {}, {}, {}, {Tile({42, 44})}), Layout({0, 1, 2})));
EXPECT_FALSE(Layout::Equal()(
Layout({0, 1, 2}, {}, {}, {}, {}, 1, PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID, 32),
Layout({0, 1, 2}, {}, {}, {}, {}, 1, PRIMITIVE_TYPE_INVALID,
PRIMITIVE_TYPE_INVALID, 1)));
EXPECT_TRUE(Layout::Equal().IgnoreElementSize()(
Layout({0, 1, 2}).set_element_size_in_bits(32),
Layout({0, 1, 2}).set_element_size_in_bits(1)));
EXPECT_TRUE(Layout::Equal().IgnoreMemorySpace()(
Layout({0, 1, 2}).set_memory_space(1),
Layout({0, 1, 2}).set_memory_space(3)));
EXPECT_TRUE(Layout::Equal().IgnoreSplitConfigs()(
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {2})),
Layout({0, 1, 2}).add_split_configs(SplitConfig(0, {3}))));
}
TEST_F(LayoutTest, LayoutToFromProto) {
auto expect_unchanged = [](const Layout& layout) {
EXPECT_EQ(layout, Layout::CreateFromProto(layout.ToProto()));
};
expect_unchanged(Layout());
expect_unchanged(Layout({1, 3, 2, 0}));
expect_unchanged(Layout({0, 1}).set_element_size_in_bits(42));
expect_unchanged(
Layout({3, 2, 1, 0}, {}, {}, {}, {Tile({42, 123}), Tile({4, 5})}));
expect_unchanged(Layout({1, 0}, {DIM_DENSE, DIM_COMPRESSED}, {}, {}, {}));
expect_unchanged(
Layout({1, 0}, {DIM_DENSE, DIM_COMPRESSED}, {}, {}, {}, 1,
PRIMITIVE_TYPE_INVALID, PRIMITIVE_TYPE_INVALID, 0, 0, {},
std::make_unique<Shape>(ShapeUtil::MakeShape(S32, {10, 10}))));
expect_unchanged(Layout({0, 1}, {}, {}, {}, {Tile({123})})
.add_split_configs(SplitConfig(0, {3}))
.add_split_configs(SplitConfig(1, {0, 4})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/layout.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/layout_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d43c021f-cd0c-40a6-968e-a0d750ae4d54 | cpp | google/quiche | quic_dispatcher | quiche/quic/core/quic_dispatcher.cc | quiche/quic/core/quic_dispatcher_test.cc | #include "quiche/quic/core/quic_dispatcher.h"
#include <openssl/ssl.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <list>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/chlo_extractor.h"
#include "quiche/quic/core/connection_id_generator.h"
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/quic_compressed_certs_cache.h"
#include "quiche/quic/core/frames/quic_connection_close_frame.h"
#include "quiche/quic/core/frames/quic_frame.h"
#include "quiche/quic/core/frames/quic_rst_stream_frame.h"
#include "quiche/quic/core/frames/quic_stop_sending_frame.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_alarm_factory.h"
#include "quiche/quic/core/quic_blocked_writer_interface.h"
#include "quiche/quic/core/quic_buffered_packet_store.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_crypto_server_stream_base.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_framer.h"
#include "quiche/quic/core/quic_packet_creator.h"
#include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/core/quic_packet_writer.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_stream_frame_data_producer.h"
#include "quiche/quic/core/quic_stream_send_buffer.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_time_wait_list_manager.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_version_manager.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/core/tls_chlo_extractor.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_stack_trace.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/print_elements.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
using BufferedPacket = QuicBufferedPacketStore::BufferedPacket;
using BufferedPacketList = QuicBufferedPacketStore::BufferedPacketList;
using EnqueuePacketResult = QuicBufferedPacketStore::EnqueuePacketResult;
namespace {
const QuicPacketLength kMinClientInitialPacketLength = 1200;
class DeleteSessionsAlarm : public QuicAlarm::DelegateWithoutContext {
public:
explicit DeleteSessionsAlarm(QuicDispatcher* dispatcher)
: dispatcher_(dispatcher) {}
DeleteSessionsAlarm(const DeleteSessionsAlarm&) = delete;
DeleteSessionsAlarm& operator=(const DeleteSessionsAlarm&) = delete;
void OnAlarm() override { dispatcher_->DeleteSessions(); }
private:
QuicDispatcher* dispatcher_;
};
class ClearStatelessResetAddressesAlarm
: public QuicAlarm::DelegateWithoutContext {
public:
explicit ClearStatelessResetAddressesAlarm(QuicDispatcher* dispatcher)
: dispatcher_(dispatcher) {}
ClearStatelessResetAddressesAlarm(const DeleteSessionsAlarm&) = delete;
ClearStatelessResetAddressesAlarm& operator=(const DeleteSessionsAlarm&) =
delete;
void OnAlarm() override { dispatcher_->ClearStatelessResetAddresses(); }
private:
QuicDispatcher* dispatcher_;
};
class StatelessConnectionTerminator {
public:
StatelessConnectionTerminator(QuicConnectionId server_connection_id,
QuicConnectionId original_server_connection_id,
const ParsedQuicVersion version,
QuicPacketNumber last_sent_packet_number,
QuicConnectionHelperInterface* helper,
QuicTimeWaitListManager* time_wait_list_manager)
: server_connection_id_(server_connection_id),
framer_(ParsedQuicVersionVector{version},
QuicTime::Zero(), Perspective::IS_SERVER,
kQuicDefaultConnectionIdLength),
collector_(helper->GetStreamSendBufferAllocator()),
creator_(server_connection_id, &framer_, &collector_),
time_wait_list_manager_(time_wait_list_manager) {
framer_.set_data_producer(&collector_);
framer_.SetInitialObfuscators(original_server_connection_id);
if (last_sent_packet_number.IsInitialized()) {
QUICHE_DCHECK(
GetQuicRestartFlag(quic_dispatcher_ack_buffered_initial_packets));
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets, 3,
8);
creator_.set_packet_number(last_sent_packet_number);
}
}
~StatelessConnectionTerminator() {
framer_.set_data_producer(nullptr);
}
void CloseConnection(QuicErrorCode error_code,
const std::string& error_details, bool ietf_quic,
std::vector<QuicConnectionId> active_connection_ids) {
SerializeConnectionClosePacket(error_code, error_details);
time_wait_list_manager_->AddConnectionIdToTimeWait(
QuicTimeWaitListManager::SEND_TERMINATION_PACKETS,
TimeWaitConnectionInfo(ietf_quic, collector_.packets(),
std::move(active_connection_ids),
QuicTime::Delta::Zero()));
}
private:
void SerializeConnectionClosePacket(QuicErrorCode error_code,
const std::string& error_details) {
QuicConnectionCloseFrame* frame =
new QuicConnectionCloseFrame(framer_.transport_version(), error_code,
NO_IETF_QUIC_ERROR, error_details,
0);
if (!creator_.AddFrame(QuicFrame(frame), NOT_RETRANSMISSION)) {
QUIC_BUG(quic_bug_10287_1) << "Unable to add frame to an empty packet";
delete frame;
return;
}
creator_.FlushCurrentPacket();
QUICHE_DCHECK_EQ(1u, collector_.packets()->size());
}
QuicConnectionId server_connection_id_;
QuicFramer framer_;
PacketCollector collector_;
QuicPacketCreator creator_;
QuicTimeWaitListManager* time_wait_list_manager_;
};
class ChloAlpnSniExtractor : public ChloExtractor::Delegate {
public:
void OnChlo(QuicTransportVersion ,
QuicConnectionId ,
const CryptoHandshakeMessage& chlo) override {
absl::string_view alpn_value;
if (chlo.GetStringPiece(kALPN, &alpn_value)) {
alpn_ = std::string(alpn_value);
}
absl::string_view sni;
if (chlo.GetStringPiece(quic::kSNI, &sni)) {
sni_ = std::string(sni);
}
absl::string_view uaid_value;
if (chlo.GetStringPiece(quic::kUAID, &uaid_value)) {
uaid_ = std::string(uaid_value);
}
}
std::string&& ConsumeAlpn() { return std::move(alpn_); }
std::string&& ConsumeSni() { return std::move(sni_); }
std::string&& ConsumeUaid() { return std::move(uaid_); }
private:
std::string alpn_;
std::string sni_;
std::string uaid_;
};
}
QuicDispatcher::QuicDispatcher(
const QuicConfig* config, const QuicCryptoServerConfig* crypto_config,
QuicVersionManager* version_manager,
std::unique_ptr<QuicConnectionHelperInterface> helper,
std::unique_ptr<QuicCryptoServerStreamBase::Helper> session_helper,
std::unique_ptr<QuicAlarmFactory> alarm_factory,
uint8_t expected_server_connection_id_length,
ConnectionIdGeneratorInterface& connection_id_generator)
: config_(config),
crypto_config_(crypto_config),
compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize),
helper_(std::move(helper)),
session_helper_(std::move(session_helper)),
alarm_factory_(std::move(alarm_factory)),
delete_sessions_alarm_(
alarm_factory_->CreateAlarm(new DeleteSessionsAlarm(this))),
buffered_packets_(this, helper_->GetClock(), alarm_factory_.get(),
stats_),
version_manager_(version_manager),
last_error_(QUIC_NO_ERROR),
new_sessions_allowed_per_event_loop_(0u),
accept_new_connections_(true),
expected_server_connection_id_length_(
expected_server_connection_id_length),
clear_stateless_reset_addresses_alarm_(alarm_factory_->CreateAlarm(
new ClearStatelessResetAddressesAlarm(this))),
connection_id_generator_(connection_id_generator) {
QUIC_DLOG(INFO) << "Created QuicDispatcher with versions: "
<< ParsedQuicVersionVectorToString(GetSupportedVersions());
}
QuicDispatcher::~QuicDispatcher() {
if (delete_sessions_alarm_ != nullptr) {
delete_sessions_alarm_->PermanentCancel();
}
if (clear_stateless_reset_addresses_alarm_ != nullptr) {
clear_stateless_reset_addresses_alarm_->PermanentCancel();
}
reference_counted_session_map_.clear();
closed_session_list_.clear();
num_sessions_in_session_map_ = 0;
}
void QuicDispatcher::InitializeWithWriter(QuicPacketWriter* writer) {
QUICHE_DCHECK(writer_ == nullptr);
writer_.reset(writer);
buffered_packets_.set_writer(writer);
time_wait_list_manager_.reset(CreateQuicTimeWaitListManager());
}
void QuicDispatcher::ProcessPacket(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet) {
QUIC_DVLOG(2) << "Dispatcher received encrypted " << packet.length()
<< " bytes:" << std::endl
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(packet.data(), packet.length()));
++stats_.packets_processed;
ReceivedPacketInfo packet_info(self_address, peer_address, packet);
std::string detailed_error;
QuicErrorCode error;
error = QuicFramer::ParsePublicHeaderDispatcherShortHeaderLengthUnknown(
packet, &packet_info.form, &packet_info.long_packet_type,
&packet_info.version_flag, &packet_info.use_length_prefix,
&packet_info.version_label, &packet_info.version,
&packet_info.destination_connection_id, &packet_info.source_connection_id,
&packet_info.retry_token, &detailed_error, connection_id_generator_);
if (error != QUIC_NO_ERROR) {
SetLastError(error);
QUIC_DLOG(ERROR) << detailed_error;
return;
}
if (packet_info.destination_connection_id.length() !=
expected_server_connection_id_length_ &&
packet_info.version.IsKnown() &&
!packet_info.version.AllowsVariableLengthConnectionIds()) {
SetLastError(QUIC_INVALID_PACKET_HEADER);
QUIC_DLOG(ERROR) << "Invalid Connection Id Length";
return;
}
if (packet_info.version_flag && IsSupportedVersion(packet_info.version)) {
if (!QuicUtils::IsConnectionIdValidForVersion(
packet_info.destination_connection_id,
packet_info.version.transport_version)) {
SetLastError(QUIC_INVALID_PACKET_HEADER);
QUIC_DLOG(ERROR)
<< "Invalid destination connection ID length for version";
return;
}
if (packet_info.version.SupportsClientConnectionIds() &&
!QuicUtils::IsConnectionIdValidForVersion(
packet_info.source_connection_id,
packet_info.version.transport_version)) {
SetLastError(QUIC_INVALID_PACKET_HEADER);
QUIC_DLOG(ERROR) << "Invalid source connection ID length for version";
return;
}
}
#ifndef NDEBUG
if (ack_buffered_initial_packets()) {
const BufferedPacketList* packet_list =
buffered_packets_.GetPacketList(packet_info.destination_connection_id);
if (packet_list != nullptr &&
packet_list->replaced_connection_id.has_value() &&
*packet_list->replaced_connection_id ==
packet_info.destination_connection_id) {
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets, 4,
8);
++stats_.packets_processed_with_replaced_cid_in_store;
}
}
#endif
if (MaybeDispatchPacket(packet_info)) {
return;
}
if (!packet_info.version_flag &&
IsSupportedVersion(ParsedQuicVersion::Q046())) {
ReceivedPacketInfo gquic_packet_info(self_address, peer_address, packet);
const QuicErrorCode gquic_error = QuicFramer::ParsePublicHeaderDispatcher(
packet, expected_server_connection_id_length_, &gquic_packet_info.form,
&gquic_packet_info.long_packet_type, &gquic_packet_info.version_flag,
&gquic_packet_info.use_length_prefix, &gquic_packet_info.version_label,
&gquic_packet_info.version,
&gquic_packet_info.destination_connection_id,
&gquic_packet_info.source_connection_id, &gquic_packet_info.retry_token,
&detailed_error);
if (gquic_error == QUIC_NO_ERROR) {
if (MaybeDispatchPacket(gquic_packet_info)) {
return;
}
} else {
QUICHE_VLOG(1) << "Tried to parse short header as gQUIC packet: "
<< detailed_error;
}
}
ProcessHeader(&packet_info);
}
namespace {
constexpr bool IsSourceUdpPortBlocked(uint16_t port) {
constexpr uint16_t blocked_ports[] = {
0,
17,
19,
53,
111,
123,
137,
138,
161,
389,
500,
1900,
3702,
5353,
5355,
11211,
};
constexpr size_t num_blocked_ports = ABSL_ARRAYSIZE(blocked_ports);
constexpr uint16_t highest_blocked_port =
blocked_ports[num_blocked_ports - 1];
if (ABSL_PREDICT_TRUE(port > highest_blocked_port)) {
return false;
}
for (size_t i = 0; i < num_blocked_ports; i++) {
if (port == blocked_ports[i]) {
return true;
}
}
return false;
}
}
bool QuicDispatcher::MaybeDispatchPacket(
const ReceivedPacketInfo& packet_info) {
if (IsSourceUdpPortBlocked(packet_info.peer_address.port())) {
QUIC_CODE_COUNT(quic_dropped_blocked_port);
return true;
}
const QuicConnectionId server_connection_id =
packet_info.destination_connection_id;
if (packet_info.version_flag && packet_info.version.IsKnown() &&
IsServerConnectionIdTooShort(server_connection_id)) {
QUICHE_DCHECK(packet_info.version_flag);
QUICHE_DCHECK(packet_info.version.AllowsVariableLengthConnectionIds());
QUIC_DLOG(INFO) << "Packet with short destination connection ID "
<< server_connection_id << " expected "
<< static_cast<int>(expected_server_connection_id_length_);
QUIC_CODE_COUNT(quic_dropped_invalid_small_initial_connection_id);
return true;
}
if (packet_info.version_flag && packet_info.version.IsKnown() &&
!QuicUtils::IsConnectionIdLengthValidForVersion(
server_connection_id.length(),
packet_info.version.transport_version)) {
QUIC_DLOG(INFO) << "Packet with destination connection ID "
<< server_connection_id << " is invalid with version "
<< packet_info.version;
QUIC_CODE_COUNT(quic_dropped_invalid_initial_connection_id);
return true;
}
auto it = reference_counted_session_map_.find(server_connection_id);
if (it != reference_counted_session_map_.end()) {
QUICHE_DCHECK(!buffered_packets_.HasBufferedPackets(server_connection_id));
it->second->ProcessUdpPacket(packet_info.self_address,
packet_info.peer_address, packet_info.packet);
return true;
}
if (buffered_packets_.HasChloForConnection(server_connection_id)) {
EnqueuePacketResult rs = buffered_packets_.EnqueuePacket(
packet_info,
std::nullopt, ConnectionIdGenerator());
switch (rs) {
case EnqueuePacketResult::SUCCESS:
break;
case EnqueuePacketResult::CID_COLLISION:
QUICHE_DCHECK(false) << "Connection " << server_connection_id
<< " already has a CHLO buffered, but "
"EnqueuePacket returned CID_COLLISION.";
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_PACKETS:
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_CONNECTIONS:
OnBufferPacketFailure(rs, packet_info.destination_connection_id);
break;
}
return true;
}
if (OnFailedToDispatchPacket(packet_info)) {
return true;
}
if (time_wait_list_manager_->IsConnectionIdInTimeWait(server_connection_id)) {
time_wait_list_manager_->ProcessPacket(
packet_info.self_address, packet_info.peer_address,
packet_info.destination_connection_id, packet_info.form,
packet_info.packet.length(), GetPerPacketContext());
return true;
}
if (!accept_new_connections_ && packet_info.version_flag) {
StatelesslyTerminateConnection(
packet_info.self_address, packet_info.peer_address,
packet_info.destination_connection_id, packet_info.form,
packet_info.version_flag, packet_info.use_length_prefix,
packet_info.version, QUIC_HANDSHAKE_FAILED,
"Stop accepting new connections",
quic::QuicTimeWaitListManager::SEND_STATELESS_RESET);
time_wait_list_manager()->ProcessPacket(
packet_info.self_address, packet_info.peer_address,
packet_info.destination_connection_id, packet_info.form,
packet_info.packet.length(), GetPerPacketContext());
OnNewConnectionRejected();
return true;
}
if (packet_info.version_flag) {
if (!IsSupportedVersion(packet_info.version)) {
if (ShouldCreateSessionForUnknownVersion(packet_info)) {
return false;
}
MaybeSendVersionNegotiationPacket(packet_info);
return true;
}
if (crypto_config()->validate_chlo_size() &&
packet_info.form == IETF_QUIC_LONG_HEADER_PACKET &&
packet_info.long_packet_type == INITIAL &&
packet_info.packet.length() < kMinClientInitialPacketLength) {
QUIC_DVLOG(1) << "Dropping initial packet which is too short, length: "
<< packet_info.packet.length();
QUIC_CODE_COUNT(quic_drop_small_initial_packets);
return true;
}
}
return false;
}
void QuicDispatcher::ProcessHeader(ReceivedPacketInfo* packet_info) {
++stats_.packets_processed_with_unknown_cid;
QuicConnectionId server_connection_id =
packet_info->destination_connection_id;
QuicPacketFate fate = ValidityChecks(*packet_info);
QuicErrorCode connection_close_error_code = QUIC_HANDSHAKE_FAILED;
std::string tls_alert_error_detail;
if (fate == kFateProcess) {
ExtractChloResult extract_chlo_result =
TryExtractChloOrBufferEarlyPacket(*packet_info);
auto& parsed_chlo = extract_chlo_result.parsed_chlo;
if (extract_chlo_result.tls_alert.has_value()) {
QUIC_BUG_IF(quic_dispatcher_parsed_chlo_and_tls_alert_coexist_1,
parsed_chlo.has_value())
<< "parsed_chlo and tls_alert should not be set at the same time.";
fate = kFateTimeWait;
uint8_t tls_alert = *extract_chlo_result.tls_alert;
connection_close_error_code = TlsAlertToQuicErrorCode(tls_alert);
tls_alert_error_detail =
absl::StrCat("TLS handshake failure from dispatcher (",
EncryptionLevelToString(ENCRYPTION_INITIAL), ") ",
static_cast<int>(tls_alert), ": ",
SSL_alert_desc_string_long(tls_alert));
} else if (!parsed_chlo.has_value()) {
return;
} else {
fate = ValidityChecksOnFullChlo(*packet_info, *parsed_chlo);
if (fate == kFateProcess) {
ProcessChlo(*std::move(parsed_chlo), packet_info);
return;
}
}
}
switch (fate) {
case kFateProcess:
QUIC_BUG(quic_dispatcher_bad_packet_fate) << fate;
break;
case kFateTimeWait: {
QUIC_DLOG(INFO) << "Adding connection ID " << server_connection_id
<< " to time-wait list.";
QUIC_CODE_COUNT(quic_reject_fate_time_wait);
const std::string& connection_close_error_detail =
tls_alert_error_detail.empty() ? "Reject connection"
: tls_alert_error_detail;
StatelesslyTerminateConnection(
packet_info->self_address, packet_info->peer_address,
server_connection_id, packet_info->form, packet_info->version_flag,
packet_info->use_length_prefix, packet_info->version,
connection_close_error_code, connection_close_error_detail,
quic::QuicTimeWaitListManager::SEND_STATELESS_RESET);
QUICHE_DCHECK(time_wait_list_manager_->IsConnectionIdInTimeWait(
server_connection_id));
time_wait_list_manager_->ProcessPacket(
packet_info->self_address, packet_info->peer_address,
server_connection_id, packet_info->form, packet_info->packet.length(),
GetPerPacketContext());
buffered_packets_.DiscardPackets(server_connection_id);
} break;
case kFateDrop:
break;
}
}
QuicDispatcher::ExtractChloResult
QuicDispatcher::TryExtractChloOrBufferEarlyPacket(
const ReceivedPacketInfo& packet_info) {
ExtractChloResult result;
if (packet_info.version.UsesTls()) {
bool has_full_tls_chlo = false;
std::string sni;
std::vector<uint16_t> supported_groups;
std::vector<uint16_t> cert_compression_algos;
std::vector<std::string> alpns;
bool resumption_attempted = false, early_data_attempted = false;
if (buffered_packets_.HasBufferedPackets(
packet_info.destination_connection_id)) {
has_full_tls_chlo = buffered_packets_.IngestPacketForTlsChloExtraction(
packet_info.destination_connection_id, packet_info.version,
packet_info.packet, &supported_groups, &cert_compression_algos,
&alpns, &sni, &resumption_attempted, &early_data_attempted,
&result.tls_alert);
} else {
TlsChloExtractor tls_chlo_extractor;
tls_chlo_extractor.IngestPacket(packet_info.version, packet_info.packet);
if (tls_chlo_extractor.HasParsedFullChlo()) {
has_full_tls_chlo = true;
supported_groups = tls_chlo_extractor.supported_groups();
cert_compression_algos = tls_chlo_extractor.cert_compression_algos();
alpns = tls_chlo_extractor.alpns();
sni = tls_chlo_extractor.server_name();
resumption_attempted = tls_chlo_extractor.resumption_attempted();
early_data_attempted = tls_chlo_extractor.early_data_attempted();
} else {
result.tls_alert = tls_chlo_extractor.tls_alert();
}
}
if (result.tls_alert.has_value()) {
QUIC_BUG_IF(quic_dispatcher_parsed_chlo_and_tls_alert_coexist_2,
has_full_tls_chlo)
<< "parsed_chlo and tls_alert should not be set at the same time.";
return result;
}
if (GetQuicFlag(quic_allow_chlo_buffering) && !has_full_tls_chlo) {
EnqueuePacketResult rs = buffered_packets_.EnqueuePacket(
packet_info,
std::nullopt, ConnectionIdGenerator());
switch (rs) {
case EnqueuePacketResult::SUCCESS:
break;
case EnqueuePacketResult::CID_COLLISION:
buffered_packets_.DiscardPackets(
packet_info.destination_connection_id);
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_PACKETS:
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_CONNECTIONS:
OnBufferPacketFailure(rs, packet_info.destination_connection_id);
break;
}
return result;
}
ParsedClientHello& parsed_chlo = result.parsed_chlo.emplace();
parsed_chlo.sni = std::move(sni);
parsed_chlo.supported_groups = std::move(supported_groups);
parsed_chlo.cert_compression_algos = std::move(cert_compression_algos);
parsed_chlo.alpns = std::move(alpns);
if (packet_info.retry_token.has_value()) {
parsed_chlo.retry_token = std::string(*packet_info.retry_token);
}
parsed_chlo.resumption_attempted = resumption_attempted;
parsed_chlo.early_data_attempted = early_data_attempted;
return result;
}
ChloAlpnSniExtractor alpn_extractor;
if (GetQuicFlag(quic_allow_chlo_buffering) &&
!ChloExtractor::Extract(packet_info.packet, packet_info.version,
config_->create_session_tag_indicators(),
&alpn_extractor,
packet_info.destination_connection_id.length())) {
EnqueuePacketResult rs = buffered_packets_.EnqueuePacket(
packet_info,
std::nullopt, ConnectionIdGenerator());
switch (rs) {
case EnqueuePacketResult::SUCCESS:
break;
case EnqueuePacketResult::CID_COLLISION:
QUIC_BUG(quic_store_cid_collision_from_gquic_packet);
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_PACKETS:
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_CONNECTIONS:
OnBufferPacketFailure(rs, packet_info.destination_connection_id);
break;
}
return result;
}
ParsedClientHello& parsed_chlo = result.parsed_chlo.emplace();
parsed_chlo.sni = alpn_extractor.ConsumeSni();
parsed_chlo.uaid = alpn_extractor.ConsumeUaid();
parsed_chlo.alpns = {alpn_extractor.ConsumeAlpn()};
return result;
}
std::string QuicDispatcher::SelectAlpn(const std::vector<std::string>& alpns) {
if (alpns.empty()) {
return "";
}
if (alpns.size() > 1u) {
const std::vector<std::string>& supported_alpns =
version_manager_->GetSupportedAlpns();
for (const std::string& alpn : alpns) {
if (std::find(supported_alpns.begin(), supported_alpns.end(), alpn) !=
supported_alpns.end()) {
return alpn;
}
}
}
return alpns[0];
}
QuicDispatcher::QuicPacketFate QuicDispatcher::ValidityChecks(
const ReceivedPacketInfo& packet_info) {
if (!packet_info.version_flag) {
QUIC_DLOG(INFO)
<< "Packet without version arrived for unknown connection ID "
<< packet_info.destination_connection_id;
MaybeResetPacketsWithNoVersion(packet_info);
return kFateDrop;
}
return kFateProcess;
}
void QuicDispatcher::CleanUpSession(QuicConnectionId server_connection_id,
QuicConnection* connection,
QuicErrorCode ,
const std::string& ,
ConnectionCloseSource ) {
write_blocked_list_.Remove(*connection);
QuicTimeWaitListManager::TimeWaitAction action =
QuicTimeWaitListManager::SEND_STATELESS_RESET;
if (connection->termination_packets() != nullptr &&
!connection->termination_packets()->empty()) {
action = QuicTimeWaitListManager::SEND_CONNECTION_CLOSE_PACKETS;
} else {
if (!connection->IsHandshakeComplete()) {
QUIC_CODE_COUNT(quic_v44_add_to_time_wait_list_with_handshake_failed);
StatelessConnectionTerminator terminator(
server_connection_id,
connection->GetOriginalDestinationConnectionId(),
connection->version(), QuicPacketNumber(),
helper_.get(), time_wait_list_manager_.get());
terminator.CloseConnection(
QUIC_HANDSHAKE_FAILED,
"Connection is closed by server before handshake confirmed",
true, connection->GetActiveServerConnectionIds());
return;
}
QUIC_CODE_COUNT(quic_v44_add_to_time_wait_list_with_stateless_reset);
}
time_wait_list_manager_->AddConnectionIdToTimeWait(
action,
TimeWaitConnectionInfo(
true, connection->termination_packets(),
connection->GetActiveServerConnectionIds(),
connection->sent_packet_manager().GetRttStats()->smoothed_rtt()));
}
void QuicDispatcher::StartAcceptingNewConnections() {
accept_new_connections_ = true;
}
void QuicDispatcher::StopAcceptingNewConnections() {
accept_new_connections_ = false;
buffered_packets_.DiscardAllPackets();
}
void QuicDispatcher::PerformActionOnActiveSessions(
quiche::UnretainedCallback<void(QuicSession*)> operation) const {
absl::flat_hash_set<QuicSession*> visited_session;
visited_session.reserve(reference_counted_session_map_.size());
for (auto const& kv : reference_counted_session_map_) {
QuicSession* session = kv.second.get();
if (visited_session.insert(session).second) {
operation(session);
}
}
}
std::vector<std::shared_ptr<QuicSession>> QuicDispatcher::GetSessionsSnapshot()
const {
std::vector<std::shared_ptr<QuicSession>> snapshot;
snapshot.reserve(reference_counted_session_map_.size());
absl::flat_hash_set<QuicSession*> visited_session;
visited_session.reserve(reference_counted_session_map_.size());
for (auto const& kv : reference_counted_session_map_) {
QuicSession* session = kv.second.get();
if (visited_session.insert(session).second) {
snapshot.push_back(kv.second);
}
}
return snapshot;
}
std::unique_ptr<QuicPerPacketContext> QuicDispatcher::GetPerPacketContext()
const {
return nullptr;
}
void QuicDispatcher::DeleteSessions() {
if (!write_blocked_list_.Empty()) {
for (const auto& session : closed_session_list_) {
if (write_blocked_list_.Remove(*session->connection())) {
QUIC_BUG(quic_bug_12724_2)
<< "QuicConnection was in WriteBlockedList before destruction "
<< session->connection()->connection_id();
}
}
}
closed_session_list_.clear();
}
void QuicDispatcher::ClearStatelessResetAddresses() {
recent_stateless_reset_addresses_.clear();
}
void QuicDispatcher::OnCanWrite() {
writer_->SetWritable();
write_blocked_list_.OnWriterUnblocked();
}
bool QuicDispatcher::HasPendingWrites() const {
return !write_blocked_list_.Empty();
}
void QuicDispatcher::Shutdown() {
while (!reference_counted_session_map_.empty()) {
QuicSession* session = reference_counted_session_map_.begin()->second.get();
session->connection()->CloseConnection(
QUIC_PEER_GOING_AWAY, "Server shutdown imminent",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
QUICHE_DCHECK(reference_counted_session_map_.empty() ||
reference_counted_session_map_.begin()->second.get() !=
session);
}
DeleteSessions();
}
void QuicDispatcher::OnConnectionClosed(QuicConnectionId server_connection_id,
QuicErrorCode error,
const std::string& error_details,
ConnectionCloseSource source) {
auto it = reference_counted_session_map_.find(server_connection_id);
if (it == reference_counted_session_map_.end()) {
QUIC_BUG(quic_bug_10287_3) << "ConnectionId " << server_connection_id
<< " does not exist in the session map. Error: "
<< QuicErrorCodeToString(error);
QUIC_BUG(quic_bug_10287_4) << QuicStackTrace();
return;
}
QUIC_DLOG_IF(INFO, error != QUIC_NO_ERROR)
<< "Closing connection (" << server_connection_id
<< ") due to error: " << QuicErrorCodeToString(error)
<< ", with details: " << error_details;
const QuicSession* session = it->second.get();
QuicConnection* connection = it->second->connection();
if (closed_session_list_.empty()) {
delete_sessions_alarm_->Update(helper()->GetClock()->ApproximateNow(),
QuicTime::Delta::Zero());
}
closed_session_list_.push_back(std::move(it->second));
CleanUpSession(it->first, connection, error, error_details, source);
bool session_removed = false;
for (const QuicConnectionId& cid :
connection->GetActiveServerConnectionIds()) {
auto it1 = reference_counted_session_map_.find(cid);
if (it1 != reference_counted_session_map_.end()) {
const QuicSession* session2 = it1->second.get();
if (session2 == session || cid == server_connection_id) {
reference_counted_session_map_.erase(it1);
session_removed = true;
} else {
QUIC_BUG(quic_dispatcher_session_mismatch)
<< "Session is mismatched in the map. server_connection_id: "
<< server_connection_id << ". Current cid: " << cid
<< ". Cid of the other session "
<< (session2 == nullptr
? "null"
: session2->connection()->connection_id().ToString());
}
} else {
QUIC_BUG_IF(quic_dispatcher_session_not_found,
cid != connection->GetOriginalDestinationConnectionId())
<< "Missing session for cid " << cid
<< ". server_connection_id: " << server_connection_id;
}
}
QUIC_BUG_IF(quic_session_is_not_removed, !session_removed);
--num_sessions_in_session_map_;
}
void QuicDispatcher::OnWriteBlocked(
QuicBlockedWriterInterface* blocked_writer) {
write_blocked_list_.Add(*blocked_writer);
}
void QuicDispatcher::OnRstStreamReceived(const QuicRstStreamFrame& ) {}
void QuicDispatcher::OnStopSendingReceived(
const QuicStopSendingFrame& ) {}
bool QuicDispatcher::TryAddNewConnectionId(
const QuicConnectionId& server_connection_id,
const QuicConnectionId& new_connection_id) {
auto it = reference_counted_session_map_.find(server_connection_id);
if (it == reference_counted_session_map_.end()) {
QUIC_BUG(quic_bug_10287_7)
<< "Couldn't locate the session that issues the connection ID in "
"reference_counted_session_map_. server_connection_id:"
<< server_connection_id << " new_connection_id: " << new_connection_id;
return false;
}
auto insertion_result = reference_counted_session_map_.insert(
std::make_pair(new_connection_id, it->second));
if (!insertion_result.second) {
QUIC_CODE_COUNT(quic_cid_already_in_session_map);
}
return insertion_result.second;
}
void QuicDispatcher::OnConnectionIdRetired(
const QuicConnectionId& server_connection_id) {
reference_counted_session_map_.erase(server_connection_id);
}
void QuicDispatcher::OnConnectionAddedToTimeWaitList(
QuicConnectionId server_connection_id) {
QUIC_DLOG(INFO) << "Connection " << server_connection_id
<< " added to time wait list.";
}
void QuicDispatcher::StatelesslyTerminateConnection(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicConnectionId server_connection_id, PacketHeaderFormat format,
bool version_flag, bool use_length_prefix, ParsedQuicVersion version,
QuicErrorCode error_code, const std::string& error_details,
QuicTimeWaitListManager::TimeWaitAction action) {
const BufferedPacketList* packet_list =
buffered_packets_.GetPacketList(server_connection_id);
if (packet_list == nullptr) {
StatelesslyTerminateConnection(
self_address, peer_address, server_connection_id, format, version_flag,
use_length_prefix, version, error_code, error_details, action,
std::nullopt,
QuicPacketNumber());
return;
}
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets, 5, 8);
StatelesslyTerminateConnection(
self_address, peer_address, packet_list->original_connection_id, format,
version_flag, use_length_prefix, version, error_code, error_details,
action, packet_list->replaced_connection_id,
packet_list->GetLastSentPacketNumber());
}
void QuicDispatcher::StatelesslyTerminateConnection(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicConnectionId server_connection_id, PacketHeaderFormat format,
bool version_flag, bool use_length_prefix, ParsedQuicVersion version,
QuicErrorCode error_code, const std::string& error_details,
QuicTimeWaitListManager::TimeWaitAction action,
const std::optional<QuicConnectionId>& replaced_connection_id,
QuicPacketNumber last_sent_packet_number) {
if (format != IETF_QUIC_LONG_HEADER_PACKET && !version_flag) {
QUIC_DVLOG(1) << "Statelessly terminating " << server_connection_id
<< " based on a non-ietf-long packet, action:" << action
<< ", error_code:" << error_code
<< ", error_details:" << error_details;
time_wait_list_manager_->AddConnectionIdToTimeWait(
action, TimeWaitConnectionInfo(format != GOOGLE_QUIC_PACKET, nullptr,
{server_connection_id}));
return;
}
if (IsSupportedVersion(version)) {
QUIC_DVLOG(1)
<< "Statelessly terminating " << server_connection_id
<< " based on an ietf-long packet, which has a supported version:"
<< version << ", error_code:" << error_code
<< ", error_details:" << error_details << ", replaced_connection_id:"
<< (replaced_connection_id.has_value()
? replaced_connection_id->ToString()
: "n/a");
if (ack_buffered_initial_packets()) {
QuicConnectionId original_connection_id = server_connection_id;
if (last_sent_packet_number.IsInitialized()) {
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets,
6, 8);
}
StatelessConnectionTerminator terminator(
replaced_connection_id.value_or(original_connection_id),
original_connection_id, version, last_sent_packet_number,
helper_.get(), time_wait_list_manager_.get());
std::vector<QuicConnectionId> active_connection_ids = {
original_connection_id};
if (replaced_connection_id.has_value()) {
active_connection_ids.push_back(*replaced_connection_id);
}
terminator.CloseConnection(error_code, error_details,
format != GOOGLE_QUIC_PACKET,
std::move(active_connection_ids));
} else {
StatelessConnectionTerminator terminator(
server_connection_id, server_connection_id, version,
last_sent_packet_number, helper_.get(),
time_wait_list_manager_.get());
terminator.CloseConnection(
error_code, error_details, format != GOOGLE_QUIC_PACKET,
{server_connection_id});
}
QUIC_CODE_COUNT(quic_dispatcher_generated_connection_close);
QuicSession::RecordConnectionCloseAtServer(
error_code, ConnectionCloseSource::FROM_SELF);
OnStatelessConnectionCloseGenerated(self_address, peer_address,
server_connection_id, version,
error_code, error_details);
return;
}
QUIC_DVLOG(1)
<< "Statelessly terminating " << server_connection_id
<< " based on an ietf-long packet, which has an unsupported version:"
<< version << ", error_code:" << error_code
<< ", error_details:" << error_details;
std::vector<std::unique_ptr<QuicEncryptedPacket>> termination_packets;
termination_packets.push_back(QuicFramer::BuildVersionNegotiationPacket(
server_connection_id, EmptyQuicConnectionId(),
format != GOOGLE_QUIC_PACKET, use_length_prefix,
{}));
time_wait_list_manager()->AddConnectionIdToTimeWait(
QuicTimeWaitListManager::SEND_TERMINATION_PACKETS,
TimeWaitConnectionInfo(format != GOOGLE_QUIC_PACKET,
&termination_packets, {server_connection_id}));
}
bool QuicDispatcher::ShouldCreateSessionForUnknownVersion(
const ReceivedPacketInfo& ) {
return false;
}
void QuicDispatcher::OnExpiredPackets(
QuicConnectionId server_connection_id,
BufferedPacketList early_arrived_packets) {
QUIC_CODE_COUNT(quic_reject_buffered_packets_expired);
QuicErrorCode error_code = QUIC_HANDSHAKE_FAILED_PACKETS_BUFFERED_TOO_LONG;
QuicSocketAddress self_address, peer_address;
if (!early_arrived_packets.buffered_packets.empty()) {
self_address = early_arrived_packets.buffered_packets.front().self_address;
peer_address = early_arrived_packets.buffered_packets.front().peer_address;
}
if (ack_buffered_initial_packets()) {
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets, 7,
8);
StatelesslyTerminateConnection(
self_address, peer_address,
early_arrived_packets.original_connection_id,
early_arrived_packets.ietf_quic ? IETF_QUIC_LONG_HEADER_PACKET
: GOOGLE_QUIC_PACKET,
true,
early_arrived_packets.version.HasLengthPrefixedConnectionIds(),
early_arrived_packets.version, error_code,
"Packets buffered for too long",
quic::QuicTimeWaitListManager::SEND_STATELESS_RESET,
early_arrived_packets.replaced_connection_id,
early_arrived_packets.GetLastSentPacketNumber());
} else {
StatelesslyTerminateConnection(
self_address, peer_address, server_connection_id,
early_arrived_packets.ietf_quic ? IETF_QUIC_LONG_HEADER_PACKET
: GOOGLE_QUIC_PACKET,
true,
early_arrived_packets.version.HasLengthPrefixedConnectionIds(),
early_arrived_packets.version, error_code,
"Packets buffered for too long",
quic::QuicTimeWaitListManager::SEND_STATELESS_RESET);
}
}
void QuicDispatcher::ProcessBufferedChlos(size_t max_connections_to_create) {
new_sessions_allowed_per_event_loop_ = max_connections_to_create;
for (; new_sessions_allowed_per_event_loop_ > 0;
--new_sessions_allowed_per_event_loop_) {
QuicConnectionId server_connection_id;
BufferedPacketList packet_list =
buffered_packets_.DeliverPacketsForNextConnection(
&server_connection_id);
const std::list<BufferedPacket>& packets = packet_list.buffered_packets;
if (packets.empty()) {
return;
}
if (!packet_list.parsed_chlo.has_value()) {
QUIC_BUG(quic_dispatcher_no_parsed_chlo_in_buffered_packets)
<< "Buffered connection has no CHLO. connection_id:"
<< server_connection_id;
continue;
}
auto session_ptr = CreateSessionFromChlo(
server_connection_id, packet_list.replaced_connection_id,
*packet_list.parsed_chlo, packet_list.version,
packets.front().self_address, packets.front().peer_address,
packet_list.tls_chlo_extractor.state(),
packet_list.connection_id_generator,
packet_list.dispatcher_sent_packets);
if (session_ptr != nullptr) {
DeliverPacketsToSession(packets, session_ptr.get());
}
}
}
bool QuicDispatcher::HasChlosBuffered() const {
return buffered_packets_.HasChlosBuffered();
}
bool QuicDispatcher::HasBufferedPackets(QuicConnectionId server_connection_id) {
return buffered_packets_.HasBufferedPackets(server_connection_id);
}
void QuicDispatcher::OnBufferPacketFailure(
EnqueuePacketResult result, QuicConnectionId server_connection_id) {
QUIC_DLOG(INFO) << "Fail to buffer packet on connection "
<< server_connection_id << " because of " << result;
}
QuicTimeWaitListManager* QuicDispatcher::CreateQuicTimeWaitListManager() {
return new QuicTimeWaitListManager(writer_.get(), this, helper_->GetClock(),
alarm_factory_.get());
}
void QuicDispatcher::ProcessChlo(ParsedClientHello parsed_chlo,
ReceivedPacketInfo* packet_info) {
if (GetQuicFlag(quic_allow_chlo_buffering) &&
new_sessions_allowed_per_event_loop_ <= 0) {
QUIC_BUG_IF(quic_bug_12724_7, buffered_packets_.HasChloForConnection(
packet_info->destination_connection_id));
EnqueuePacketResult rs = buffered_packets_.EnqueuePacket(
*packet_info, std::move(parsed_chlo), ConnectionIdGenerator());
switch (rs) {
case EnqueuePacketResult::SUCCESS:
break;
case EnqueuePacketResult::CID_COLLISION:
buffered_packets_.DiscardPackets(
packet_info->destination_connection_id);
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_PACKETS:
ABSL_FALLTHROUGH_INTENDED;
case EnqueuePacketResult::TOO_MANY_CONNECTIONS:
OnBufferPacketFailure(rs, packet_info->destination_connection_id);
break;
}
return;
}
BufferedPacketList packet_list =
buffered_packets_.DeliverPackets(packet_info->destination_connection_id);
QuicConnectionId original_connection_id =
packet_list.buffered_packets.empty()
? packet_info->destination_connection_id
: packet_list.original_connection_id;
TlsChloExtractor::State chlo_extractor_state =
packet_list.buffered_packets.empty()
? TlsChloExtractor::State::kParsedFullSinglePacketChlo
: packet_list.tls_chlo_extractor.state();
auto session_ptr = CreateSessionFromChlo(
original_connection_id, packet_list.replaced_connection_id, parsed_chlo,
packet_info->version, packet_info->self_address,
packet_info->peer_address, chlo_extractor_state,
packet_list.connection_id_generator, packet_list.dispatcher_sent_packets);
if (session_ptr == nullptr) {
QUICHE_DCHECK_EQ(packet_list.connection_id_generator, nullptr);
return;
}
session_ptr->ProcessUdpPacket(packet_info->self_address,
packet_info->peer_address, packet_info->packet);
DeliverPacketsToSession(packet_list.buffered_packets, session_ptr.get());
--new_sessions_allowed_per_event_loop_;
}
void QuicDispatcher::SetLastError(QuicErrorCode error) { last_error_ = error; }
bool QuicDispatcher::OnFailedToDispatchPacket(
const ReceivedPacketInfo& ) {
return false;
}
const ParsedQuicVersionVector& QuicDispatcher::GetSupportedVersions() {
return version_manager_->GetSupportedVersions();
}
void QuicDispatcher::DeliverPacketsToSession(
const std::list<BufferedPacket>& packets, QuicSession* session) {
for (const BufferedPacket& packet : packets) {
session->ProcessUdpPacket(packet.self_address, packet.peer_address,
*(packet.packet));
}
}
bool QuicDispatcher::IsSupportedVersion(const ParsedQuicVersion version) {
for (const ParsedQuicVersion& supported_version :
version_manager_->GetSupportedVersions()) {
if (version == supported_version) {
return true;
}
}
return false;
}
bool QuicDispatcher::IsServerConnectionIdTooShort(
QuicConnectionId connection_id) const {
if (connection_id.length() >= kQuicMinimumInitialConnectionIdLength ||
connection_id.length() >= expected_server_connection_id_length_) {
return false;
}
uint8_t generator_output =
connection_id.IsEmpty()
? connection_id_generator_.ConnectionIdLength(0x00)
: connection_id_generator_.ConnectionIdLength(
static_cast<uint8_t>(*connection_id.data()));
return connection_id.length() < generator_output;
}
std::shared_ptr<QuicSession> QuicDispatcher::CreateSessionFromChlo(
const QuicConnectionId original_connection_id,
const std::optional<QuicConnectionId>& replaced_connection_id,
const ParsedClientHello& parsed_chlo, const ParsedQuicVersion version,
const QuicSocketAddress self_address, const QuicSocketAddress peer_address,
TlsChloExtractor::State chlo_extractor_state,
ConnectionIdGeneratorInterface* connection_id_generator,
absl::Span<const DispatcherSentPacket> dispatcher_sent_packets) {
bool should_generate_cid = false;
if (connection_id_generator == nullptr) {
should_generate_cid = true;
connection_id_generator = &ConnectionIdGenerator();
}
std::optional<QuicConnectionId> server_connection_id;
if (should_generate_cid) {
server_connection_id = connection_id_generator->MaybeReplaceConnectionId(
original_connection_id, version);
if (server_connection_id.has_value() &&
(server_connection_id->IsEmpty() ||
*server_connection_id == original_connection_id)) {
server_connection_id.reset();
}
QUIC_DVLOG(1) << "MaybeReplaceConnectionId(" << original_connection_id
<< ") = "
<< (server_connection_id.has_value()
? server_connection_id->ToString()
: "nullopt");
if (server_connection_id.has_value()) {
switch (HandleConnectionIdCollision(
original_connection_id, *server_connection_id, self_address,
peer_address, version, &parsed_chlo)) {
case VisitorInterface::HandleCidCollisionResult::kOk:
break;
case VisitorInterface::HandleCidCollisionResult::kCollision:
return nullptr;
}
}
} else {
server_connection_id = replaced_connection_id;
}
const bool connection_id_replaced = server_connection_id.has_value();
if (!connection_id_replaced) {
server_connection_id = original_connection_id;
}
std::string alpn = SelectAlpn(parsed_chlo.alpns);
std::unique_ptr<QuicSession> session =
CreateQuicSession(*server_connection_id, self_address, peer_address, alpn,
version, parsed_chlo, *connection_id_generator);
if (ABSL_PREDICT_FALSE(session == nullptr)) {
QUIC_BUG(quic_bug_10287_8)
<< "CreateQuicSession returned nullptr for " << *server_connection_id
<< " from " << peer_address << " to " << self_address << " ALPN \""
<< alpn << "\" version " << version;
return nullptr;
}
++stats_.sessions_created;
if (chlo_extractor_state ==
TlsChloExtractor::State::kParsedFullMultiPacketChlo) {
QUIC_CODE_COUNT(quic_connection_created_multi_packet_chlo);
session->connection()->SetMultiPacketClientHello();
} else {
QUIC_CODE_COUNT(quic_connection_created_single_packet_chlo);
}
if (ack_buffered_initial_packets() && !dispatcher_sent_packets.empty()) {
QUIC_RESTART_FLAG_COUNT_N(quic_dispatcher_ack_buffered_initial_packets, 8,
8);
session->connection()->AddDispatcherSentPackets(dispatcher_sent_packets);
}
if (connection_id_replaced) {
session->connection()->SetOriginalDestinationConnectionId(
original_connection_id);
}
session->connection()->OnParsedClientHelloInfo(parsed_chlo);
QUIC_DLOG(INFO) << "Created new session for " << *server_connection_id;
auto insertion_result = reference_counted_session_map_.insert(std::make_pair(
*server_connection_id, std::shared_ptr<QuicSession>(std::move(session))));
std::shared_ptr<QuicSession> session_ptr = insertion_result.first->second;
if (!insertion_result.second) {
QUIC_BUG(quic_bug_10287_9)
<< "Tried to add a session to session_map with existing "
"connection id: "
<< *server_connection_id;
} else {
++num_sessions_in_session_map_;
if (connection_id_replaced) {
auto insertion_result2 = reference_counted_session_map_.insert(
std::make_pair(original_connection_id, session_ptr));
QUIC_BUG_IF(quic_460317833_02, !insertion_result2.second)
<< "Original connection ID already in session_map: "
<< original_connection_id;
}
}
return session_ptr;
}
QuicDispatcher::HandleCidCollisionResult
QuicDispatcher::HandleConnectionIdCollision(
const QuicConnectionId& original_connection_id,
const QuicConnectionId& replaced_connection_id,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, ParsedQuicVersion version,
const ParsedClientHello* parsed_chlo) {
HandleCidCollisionResult result = HandleCidCollisionResult::kOk;
auto existing_session_iter =
reference_counted_session_map_.find(replaced_connection_id);
if (existing_session_iter != reference_counted_session_map_.end()) {
result = HandleCidCollisionResult::kCollision;
QUIC_CODE_COUNT(quic_connection_id_collision);
QuicConnection* other_connection =
existing_session_iter->second->connection();
if (other_connection != nullptr) {
QUIC_LOG_EVERY_N_SEC(ERROR, 10)
<< "QUIC Connection ID collision. original_connection_id:"
<< original_connection_id
<< ", replaced_connection_id:" << replaced_connection_id
<< ", version:" << version << ", self_address:" << self_address
<< ", peer_address:" << peer_address << ", parsed_chlo:"
<< (parsed_chlo == nullptr ? "null" : parsed_chlo->ToString())
<< ", other peer address: " << other_connection->peer_address()
<< ", other CIDs: "
<< quiche::PrintElements(
other_connection->GetActiveServerConnectionIds())
<< ", other stats: " << other_connection->GetStats();
}
} else if (buffered_packets_.HasBufferedPackets(replaced_connection_id)) {
result = HandleCidCollisionResult::kCollision;
QUIC_CODE_COUNT(quic_connection_id_collision_with_buffered_session);
}
if (result == HandleCidCollisionResult::kOk) {
return result;
}
const bool collide_with_active_session =
existing_session_iter != reference_counted_session_map_.end();
QUIC_DLOG(INFO) << "QUIC Connection ID collision with "
<< (collide_with_active_session ? "active session"
: "buffered session")
<< " for original_connection_id:" << original_connection_id
<< ", replaced_connection_id:" << replaced_connection_id;
StatelesslyTerminateConnection(
self_address, peer_address, original_connection_id,
IETF_QUIC_LONG_HEADER_PACKET,
true, version.HasLengthPrefixedConnectionIds(), version,
QUIC_HANDSHAKE_FAILED, "Connection ID collision, please retry",
QuicTimeWaitListManager::SEND_CONNECTION_CLOSE_PACKETS);
return result;
}
void QuicDispatcher::MaybeResetPacketsWithNoVersion(
const ReceivedPacketInfo& packet_info) {
QUICHE_DCHECK(!packet_info.version_flag);
if (recent_stateless_reset_addresses_.contains(packet_info.peer_address)) {
QUIC_CODE_COUNT(quic_donot_send_reset_repeatedly);
return;
}
if (packet_info.form != GOOGLE_QUIC_PACKET) {
if (packet_info.packet.length() <=
QuicFramer::GetMinStatelessResetPacketLength()) {
QUIC_CODE_COUNT(quic_drop_too_small_short_header_packets);
return;
}
} else {
const size_t MinValidPacketLength =
kPacketHeaderTypeSize + expected_server_connection_id_length_ +
PACKET_1BYTE_PACKET_NUMBER + 1 + 12;
if (packet_info.packet.length() < MinValidPacketLength) {
QUIC_CODE_COUNT(drop_too_small_packets);
return;
}
}
if (recent_stateless_reset_addresses_.size() >=
GetQuicFlag(quic_max_recent_stateless_reset_addresses)) {
QUIC_CODE_COUNT(quic_too_many_recent_reset_addresses);
return;
}
if (recent_stateless_reset_addresses_.empty()) {
clear_stateless_reset_addresses_alarm_->Update(
helper()->GetClock()->ApproximateNow() +
QuicTime::Delta::FromMilliseconds(
GetQuicFlag(quic_recent_stateless_reset_addresses_lifetime_ms)),
QuicTime::Delta::Zero());
}
recent_stateless_reset_addresses_.emplace(packet_info.peer_address);
time_wait_list_manager()->SendPublicReset(
packet_info.self_address, packet_info.peer_address,
packet_info.destination_connection_id,
packet_info.form != GOOGLE_QUIC_PACKET, packet_info.packet.length(),
GetPerPacketContext());
}
void QuicDispatcher::MaybeSendVersionNegotiationPacket(
const ReceivedPacketInfo& packet_info) {
if (crypto_config()->validate_chlo_size() &&
packet_info.packet.length() < kMinPacketSizeForVersionNegotiation) {
return;
}
time_wait_list_manager()->SendVersionNegotiationPacket(
packet_info.destination_connection_id, packet_info.source_connection_id,
packet_info.form != GOOGLE_QUIC_PACKET, packet_info.use_length_prefix,
GetSupportedVersions(), packet_info.self_address,
packet_info.peer_address, GetPerPacketContext());
}
size_t QuicDispatcher::NumSessions() const {
return num_sessions_in_session_map_;
}
} | #include "quiche/quic/core/quic_dispatcher.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <list>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/chlo_extractor.h"
#include "quiche/quic/core/connection_id_generator.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/quic_compressed_certs_cache.h"
#include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/frames/quic_connection_close_frame.h"
#include "quiche/quic/core/http/quic_server_session_base.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_crypto_server_stream_base.h"
#include "quiche/quic/core/quic_crypto_stream.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packet_writer.h"
#include "quiche/quic/core/quic_packet_writer_wrapper.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_time_wait_list_manager.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_version_manager.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/first_flight.h"
#include "quiche/quic/test_tools/mock_connection_id_generator.h"
#include "quiche/quic/test_tools/mock_quic_time_wait_list_manager.h"
#include "quiche/quic/test_tools/quic_buffered_packet_store_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_dispatcher_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/tools/quic_simple_crypto_server_stream_helper.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
using testing::_;
using testing::AllOf;
using testing::ByMove;
using testing::ElementsAreArray;
using testing::Eq;
using testing::Field;
using testing::InSequence;
using testing::Invoke;
using testing::IsEmpty;
using testing::NiceMock;
using testing::Not;
using testing::Ref;
using testing::Return;
using testing::ReturnRef;
using testing::WithArg;
using testing::WithoutArgs;
static const size_t kDefaultMaxConnectionsInStore = 100;
static const size_t kMaxConnectionsWithoutCHLO =
kDefaultMaxConnectionsInStore / 2;
static const int16_t kMaxNumSessionsToCreate = 16;
namespace quic {
namespace test {
namespace {
const QuicConnectionId kReturnConnectionId{
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}};
class TestQuicSpdyServerSession : public QuicServerSessionBase {
public:
TestQuicSpdyServerSession(const QuicConfig& config,
QuicConnection* connection,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache)
: QuicServerSessionBase(config, CurrentSupportedVersions(), connection,
nullptr, nullptr, crypto_config,
compressed_certs_cache) {
Initialize();
}
TestQuicSpdyServerSession(const TestQuicSpdyServerSession&) = delete;
TestQuicSpdyServerSession& operator=(const TestQuicSpdyServerSession&) =
delete;
~TestQuicSpdyServerSession() override { DeleteConnection(); }
MOCK_METHOD(void, OnConnectionClosed,
(const QuicConnectionCloseFrame& frame,
ConnectionCloseSource source),
(override));
MOCK_METHOD(QuicSpdyStream*, CreateIncomingStream, (QuicStreamId id),
(override));
MOCK_METHOD(QuicSpdyStream*, CreateIncomingStream, (PendingStream*),
(override));
MOCK_METHOD(QuicSpdyStream*, CreateOutgoingBidirectionalStream, (),
(override));
MOCK_METHOD(QuicSpdyStream*, CreateOutgoingUnidirectionalStream, (),
(override));
std::unique_ptr<QuicCryptoServerStreamBase> CreateQuicCryptoServerStream(
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache) override {
return CreateCryptoServerStream(crypto_config, compressed_certs_cache, this,
stream_helper());
}
QuicCryptoServerStreamBase::Helper* stream_helper() {
return QuicServerSessionBase::stream_helper();
}
};
class TestDispatcher : public QuicDispatcher {
public:
TestDispatcher(const QuicConfig* config,
const QuicCryptoServerConfig* crypto_config,
QuicVersionManager* version_manager, QuicRandom* random,
ConnectionIdGeneratorInterface& generator)
: QuicDispatcher(config, crypto_config, version_manager,
std::make_unique<MockQuicConnectionHelper>(),
std::unique_ptr<QuicCryptoServerStreamBase::Helper>(
new QuicSimpleCryptoServerStreamHelper()),
std::make_unique<TestAlarmFactory>(),
kQuicDefaultConnectionIdLength, generator),
random_(random) {
EXPECT_CALL(*this, ConnectionIdGenerator())
.WillRepeatedly(ReturnRef(generator));
}
MOCK_METHOD(std::unique_ptr<QuicSession>, CreateQuicSession,
(QuicConnectionId connection_id,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, absl::string_view alpn,
const ParsedQuicVersion& version,
const ParsedClientHello& parsed_chlo,
ConnectionIdGeneratorInterface& connection_id_generator),
(override));
MOCK_METHOD(ConnectionIdGeneratorInterface&, ConnectionIdGenerator, (),
(override));
struct TestQuicPerPacketContext : public QuicPerPacketContext {
std::string custom_packet_context;
};
std::unique_ptr<QuicPerPacketContext> GetPerPacketContext() const override {
auto test_context = std::make_unique<TestQuicPerPacketContext>();
test_context->custom_packet_context = custom_packet_context_;
return std::move(test_context);
}
void RestorePerPacketContext(
std::unique_ptr<QuicPerPacketContext> context) override {
TestQuicPerPacketContext* test_context =
static_cast<TestQuicPerPacketContext*>(context.get());
custom_packet_context_ = test_context->custom_packet_context;
}
std::string custom_packet_context_;
using QuicDispatcher::ConnectionIdGenerator;
using QuicDispatcher::MaybeDispatchPacket;
using QuicDispatcher::writer;
QuicRandom* random_;
};
class MockServerConnection : public MockQuicConnection {
public:
MockServerConnection(QuicConnectionId connection_id,
MockQuicConnectionHelper* helper,
MockAlarmFactory* alarm_factory,
QuicDispatcher* dispatcher)
: MockQuicConnection(connection_id, helper, alarm_factory,
Perspective::IS_SERVER),
dispatcher_(dispatcher),
active_connection_ids_({connection_id}) {}
void AddNewConnectionId(QuicConnectionId id) {
if (!dispatcher_->TryAddNewConnectionId(active_connection_ids_.back(),
id)) {
return;
}
QuicConnectionPeer::SetServerConnectionId(this, id);
active_connection_ids_.push_back(id);
}
void UnconditionallyAddNewConnectionIdForTest(QuicConnectionId id) {
dispatcher_->TryAddNewConnectionId(active_connection_ids_.back(), id);
active_connection_ids_.push_back(id);
}
void RetireConnectionId(QuicConnectionId id) {
auto it = std::find(active_connection_ids_.begin(),
active_connection_ids_.end(), id);
QUICHE_DCHECK(it != active_connection_ids_.end());
dispatcher_->OnConnectionIdRetired(id);
active_connection_ids_.erase(it);
}
std::vector<QuicConnectionId> GetActiveServerConnectionIds() const override {
std::vector<QuicConnectionId> result;
for (const auto& cid : active_connection_ids_) {
result.push_back(cid);
}
auto original_connection_id = GetOriginalDestinationConnectionId();
if (std::find(result.begin(), result.end(), original_connection_id) ==
result.end()) {
result.push_back(original_connection_id);
}
return result;
}
void UnregisterOnConnectionClosed() {
QUIC_LOG(ERROR) << "Unregistering " << connection_id();
dispatcher_->OnConnectionClosed(connection_id(), QUIC_NO_ERROR,
"Unregistering.",
ConnectionCloseSource::FROM_SELF);
}
private:
QuicDispatcher* dispatcher_;
std::vector<QuicConnectionId> active_connection_ids_;
};
class QuicDispatcherTestBase : public QuicTestWithParam<ParsedQuicVersion> {
public:
QuicDispatcherTestBase()
: QuicDispatcherTestBase(crypto_test_utils::ProofSourceForTesting()) {}
explicit QuicDispatcherTestBase(std::unique_ptr<ProofSource> proof_source)
: QuicDispatcherTestBase(std::move(proof_source),
AllSupportedVersions()) {}
explicit QuicDispatcherTestBase(
const ParsedQuicVersionVector& supported_versions)
: QuicDispatcherTestBase(crypto_test_utils::ProofSourceForTesting(),
supported_versions) {}
explicit QuicDispatcherTestBase(
std::unique_ptr<ProofSource> proof_source,
const ParsedQuicVersionVector& supported_versions)
: version_(GetParam()),
version_manager_(supported_versions),
crypto_config_(QuicCryptoServerConfig::TESTING,
QuicRandom::GetInstance(), std::move(proof_source),
KeyExchangeSource::Default()),
server_address_(QuicIpAddress::Any4(), 5),
dispatcher_(new NiceMock<TestDispatcher>(
&config_, &crypto_config_, &version_manager_,
mock_helper_.GetRandomGenerator(), connection_id_generator_)),
time_wait_list_manager_(nullptr),
session1_(nullptr),
session2_(nullptr),
store_(nullptr),
connection_id_(1) {}
void SetUp() override {
dispatcher_->InitializeWithWriter(new NiceMock<MockPacketWriter>());
QuicDispatcherPeer::set_new_sessions_allowed_per_event_loop(
dispatcher_.get(), kMaxNumSessionsToCreate);
}
MockQuicConnection* connection1() {
if (session1_ == nullptr) {
return nullptr;
}
return reinterpret_cast<MockQuicConnection*>(session1_->connection());
}
MockQuicConnection* connection2() {
if (session2_ == nullptr) {
return nullptr;
}
return reinterpret_cast<MockQuicConnection*>(session2_->connection());
}
void ProcessPacket(QuicSocketAddress peer_address,
QuicConnectionId server_connection_id,
bool has_version_flag, const std::string& data) {
ProcessPacket(peer_address, server_connection_id, has_version_flag, data,
CONNECTION_ID_PRESENT, PACKET_4BYTE_PACKET_NUMBER);
}
void ProcessPacket(QuicSocketAddress peer_address,
QuicConnectionId server_connection_id,
bool has_version_flag, const std::string& data,
QuicConnectionIdIncluded server_connection_id_included,
QuicPacketNumberLength packet_number_length) {
ProcessPacket(peer_address, server_connection_id, has_version_flag, data,
server_connection_id_included, packet_number_length, 1);
}
void ProcessPacket(QuicSocketAddress peer_address,
QuicConnectionId server_connection_id,
bool has_version_flag, const std::string& data,
QuicConnectionIdIncluded server_connection_id_included,
QuicPacketNumberLength packet_number_length,
uint64_t packet_number) {
ProcessPacket(peer_address, server_connection_id, has_version_flag,
version_, data, true, server_connection_id_included,
packet_number_length, packet_number);
}
void ProcessPacket(QuicSocketAddress peer_address,
QuicConnectionId server_connection_id,
bool has_version_flag, ParsedQuicVersion version,
const std::string& data, bool full_padding,
QuicConnectionIdIncluded server_connection_id_included,
QuicPacketNumberLength packet_number_length,
uint64_t packet_number) {
ProcessPacket(peer_address, server_connection_id, EmptyQuicConnectionId(),
has_version_flag, version, data, full_padding,
server_connection_id_included, CONNECTION_ID_ABSENT,
packet_number_length, packet_number);
}
void ProcessPacket(QuicSocketAddress peer_address,
QuicConnectionId server_connection_id,
QuicConnectionId client_connection_id,
bool has_version_flag, ParsedQuicVersion version,
const std::string& data, bool full_padding,
QuicConnectionIdIncluded server_connection_id_included,
QuicConnectionIdIncluded client_connection_id_included,
QuicPacketNumberLength packet_number_length,
uint64_t packet_number) {
ParsedQuicVersionVector versions(SupportedVersions(version));
std::unique_ptr<QuicEncryptedPacket> packet(ConstructEncryptedPacket(
server_connection_id, client_connection_id, has_version_flag, false,
packet_number, data, full_padding, server_connection_id_included,
client_connection_id_included, packet_number_length, &versions));
std::unique_ptr<QuicReceivedPacket> received_packet(
ConstructReceivedPacket(*packet, mock_helper_.GetClock()->Now()));
if (!has_version_flag || !version.AllowsVariableLengthConnectionIds() ||
server_connection_id.length() == 0 ||
server_connection_id_included == CONNECTION_ID_ABSENT) {
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(_))
.WillRepeatedly(Return(generated_connection_id_.has_value()
? generated_connection_id_->length()
: kQuicDefaultConnectionIdLength));
}
ProcessReceivedPacket(std::move(received_packet), peer_address, version,
server_connection_id);
}
void ProcessReceivedPacket(
std::unique_ptr<QuicReceivedPacket> received_packet,
const QuicSocketAddress& peer_address, const ParsedQuicVersion& version,
const QuicConnectionId& server_connection_id) {
if (version.UsesQuicCrypto() &&
ChloExtractor::Extract(*received_packet, version, {}, nullptr,
server_connection_id.length())) {
data_connection_map_[server_connection_id].push_front(
std::string(received_packet->data(), received_packet->length()));
} else {
data_connection_map_[server_connection_id].push_back(
std::string(received_packet->data(), received_packet->length()));
}
dispatcher_->ProcessPacket(server_address_, peer_address, *received_packet);
}
void ValidatePacket(QuicConnectionId conn_id,
const QuicEncryptedPacket& packet) {
EXPECT_EQ(data_connection_map_[conn_id].front().length(),
packet.AsStringPiece().length());
EXPECT_EQ(data_connection_map_[conn_id].front(), packet.AsStringPiece());
data_connection_map_[conn_id].pop_front();
}
std::unique_ptr<QuicSession> CreateSession(
TestDispatcher* dispatcher, const QuicConfig& config,
QuicConnectionId connection_id, const QuicSocketAddress& ,
MockQuicConnectionHelper* helper, MockAlarmFactory* alarm_factory,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache,
TestQuicSpdyServerSession** session_ptr) {
MockServerConnection* connection = new MockServerConnection(
connection_id, helper, alarm_factory, dispatcher);
connection->SetQuicPacketWriter(dispatcher->writer(),
false);
auto session = std::make_unique<TestQuicSpdyServerSession>(
config, connection, crypto_config, compressed_certs_cache);
*session_ptr = session.get();
connection->set_visitor(session.get());
ON_CALL(*connection, CloseConnection(_, _, _))
.WillByDefault(WithoutArgs(Invoke(
connection, &MockServerConnection::UnregisterOnConnectionClosed)));
return session;
}
void CreateTimeWaitListManager() {
time_wait_list_manager_ = new MockTimeWaitListManager(
QuicDispatcherPeer::GetWriter(dispatcher_.get()), dispatcher_.get(),
mock_helper_.GetClock(), &mock_alarm_factory_);
QuicDispatcherPeer::SetTimeWaitListManager(dispatcher_.get(),
time_wait_list_manager_);
}
std::string SerializeCHLO() {
CryptoHandshakeMessage client_hello;
client_hello.set_tag(kCHLO);
client_hello.SetStringPiece(kALPN, ExpectedAlpn());
return std::string(client_hello.GetSerialized().AsStringPiece());
}
void ProcessUndecryptableEarlyPacket(
const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
ProcessUndecryptableEarlyPacket(version_, peer_address,
server_connection_id);
}
void ProcessUndecryptableEarlyPacket(
const ParsedQuicVersion& version, const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
std::unique_ptr<QuicEncryptedPacket> encrypted_packet =
GetUndecryptableEarlyPacket(version, server_connection_id);
std::unique_ptr<QuicReceivedPacket> received_packet(ConstructReceivedPacket(
*encrypted_packet, mock_helper_.GetClock()->Now()));
ProcessReceivedPacket(std::move(received_packet), peer_address, version,
server_connection_id);
}
void ProcessFirstFlight(const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
ProcessFirstFlight(version_, peer_address, server_connection_id);
}
void ProcessFirstFlight(const ParsedQuicVersion& version,
const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
ProcessFirstFlight(version, peer_address, server_connection_id,
EmptyQuicConnectionId());
}
void ProcessFirstFlight(const ParsedQuicVersion& version,
const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id,
const QuicConnectionId& client_connection_id) {
ProcessFirstFlight(version, peer_address, server_connection_id,
client_connection_id, TestClientCryptoConfig());
}
void ProcessFirstFlight(
const ParsedQuicVersion& version, const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id,
const QuicConnectionId& client_connection_id,
std::unique_ptr<QuicCryptoClientConfig> client_crypto_config) {
if (expect_generator_is_called_) {
if (version.AllowsVariableLengthConnectionIds()) {
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(server_connection_id, version))
.WillOnce(Return(generated_connection_id_));
} else {
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(server_connection_id, version))
.WillOnce(Return(std::nullopt));
}
}
std::vector<std::unique_ptr<QuicReceivedPacket>> packets =
GetFirstFlightOfPackets(version, DefaultQuicConfig(),
server_connection_id, client_connection_id,
std::move(client_crypto_config));
for (auto&& packet : packets) {
ProcessReceivedPacket(std::move(packet), peer_address, version,
server_connection_id);
}
}
std::unique_ptr<QuicCryptoClientConfig> TestClientCryptoConfig() {
auto client_crypto_config = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting());
if (address_token_.has_value()) {
client_crypto_config->LookupOrCreate(TestServerId())
->set_source_address_token(*address_token_);
}
return client_crypto_config;
}
void SetAddressToken(std::string address_token) {
address_token_ = std::move(address_token);
}
std::string ExpectedAlpnForVersion(ParsedQuicVersion version) {
return AlpnForVersion(version);
}
std::string ExpectedAlpn() { return ExpectedAlpnForVersion(version_); }
auto MatchParsedClientHello() {
if (version_.UsesQuicCrypto()) {
return AllOf(
Field(&ParsedClientHello::alpns, ElementsAreArray({ExpectedAlpn()})),
Field(&ParsedClientHello::sni, Eq(TestHostname())),
Field(&ParsedClientHello::supported_groups, IsEmpty()));
}
return AllOf(
Field(&ParsedClientHello::alpns, ElementsAreArray({ExpectedAlpn()})),
Field(&ParsedClientHello::sni, Eq(TestHostname())),
Field(&ParsedClientHello::supported_groups, Not(IsEmpty())));
}
void MarkSession1Deleted() { session1_ = nullptr; }
void VerifyVersionSupported(ParsedQuicVersion version) {
expect_generator_is_called_ = true;
QuicConnectionId connection_id = TestConnectionId(++connection_id_);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(connection_id, _, client_address,
Eq(ExpectedAlpnForVersion(version)), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, connection_id, client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, connection_id](const QuicEncryptedPacket& packet) {
ValidatePacket(connection_id, packet);
})));
ProcessFirstFlight(version, client_address, connection_id);
}
void VerifyVersionNotSupported(ParsedQuicVersion version) {
QuicConnectionId connection_id = TestConnectionId(++connection_id_);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(connection_id, _, client_address, _, _, _, _))
.Times(0);
expect_generator_is_called_ = false;
ProcessFirstFlight(version, client_address, connection_id);
}
void TestTlsMultiPacketClientHello(bool add_reordering,
bool long_connection_id);
void TestVersionNegotiationForUnknownVersionInvalidShortInitialConnectionId(
const QuicConnectionId& server_connection_id,
const QuicConnectionId& client_connection_id);
TestAlarmFactory::TestAlarm* GetClearResetAddressesAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
QuicDispatcherPeer::GetClearResetAddressesAlarm(dispatcher_.get()));
}
ParsedQuicVersion version_;
MockQuicConnectionHelper mock_helper_;
MockAlarmFactory mock_alarm_factory_;
QuicConfig config_;
QuicVersionManager version_manager_;
QuicCryptoServerConfig crypto_config_;
QuicSocketAddress server_address_;
bool expect_generator_is_called_ = true;
std::optional<QuicConnectionId> generated_connection_id_;
MockConnectionIdGenerator connection_id_generator_;
std::unique_ptr<NiceMock<TestDispatcher>> dispatcher_;
MockTimeWaitListManager* time_wait_list_manager_;
TestQuicSpdyServerSession* session1_;
TestQuicSpdyServerSession* session2_;
std::map<QuicConnectionId, std::list<std::string>> data_connection_map_;
QuicBufferedPacketStore* store_;
uint64_t connection_id_;
std::optional<std::string> address_token_;
};
class QuicDispatcherTestAllVersions : public QuicDispatcherTestBase {};
class QuicDispatcherTestOneVersion : public QuicDispatcherTestBase {};
class QuicDispatcherTestNoVersions : public QuicDispatcherTestBase {
public:
QuicDispatcherTestNoVersions()
: QuicDispatcherTestBase(ParsedQuicVersionVector{}) {}
};
INSTANTIATE_TEST_SUITE_P(QuicDispatcherTestsAllVersions,
QuicDispatcherTestAllVersions,
::testing::ValuesIn(CurrentSupportedVersions()),
::testing::PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(QuicDispatcherTestsOneVersion,
QuicDispatcherTestOneVersion,
::testing::Values(CurrentSupportedVersions().front()),
::testing::PrintToStringParamName());
INSTANTIATE_TEST_SUITE_P(QuicDispatcherTestsNoVersion,
QuicDispatcherTestNoVersions,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicDispatcherTestAllVersions, TlsClientHelloCreatesSession) {
if (version_.UsesQuicCrypto()) {
return;
}
SetAddressToken("hsdifghdsaifnasdpfjdsk");
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
OnParsedClientHelloInfo(MatchParsedClientHello()))
.Times(1);
ProcessFirstFlight(client_address, TestConnectionId(1));
}
TEST_P(QuicDispatcherTestAllVersions,
TlsClientHelloCreatesSessionWithCorrectConnectionIdGenerator) {
if (version_.UsesQuicCrypto()) {
return;
}
SetAddressToken("hsdifghdsaifnasdpfjdsk");
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
MockConnectionIdGenerator mock_connection_id_generator;
EXPECT_CALL(*dispatcher_, ConnectionIdGenerator())
.WillRepeatedly(ReturnRef(mock_connection_id_generator));
ConnectionIdGeneratorInterface& expected_generator =
mock_connection_id_generator;
EXPECT_CALL(mock_connection_id_generator,
MaybeReplaceConnectionId(TestConnectionId(1), version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(),
Ref(expected_generator)))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
expect_generator_is_called_ = false;
ProcessFirstFlight(client_address, TestConnectionId(1));
}
TEST_P(QuicDispatcherTestAllVersions, VariableServerConnectionIdLength) {
QuicConnectionId old_id = TestConnectionId(1);
if (version_.HasIetfQuicFrames()) {
generated_connection_id_ =
QuicConnectionId({0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
0x09, 0x0a, 0x0b});
}
QuicConnectionId new_id =
generated_connection_id_.has_value() ? *generated_connection_id_ : old_id;
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(new_id, _, client_address, Eq(ExpectedAlpn()),
_, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, new_id, client_address, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, old_id);
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(1);
ProcessPacket(client_address, new_id, false, "foo");
}
void QuicDispatcherTestBase::TestTlsMultiPacketClientHello(
bool add_reordering, bool long_connection_id) {
if (!version_.UsesTls()) {
return;
}
SetAddressToken("857293462398");
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId original_connection_id, new_connection_id;
if (long_connection_id) {
original_connection_id = TestConnectionIdNineBytesLong(1);
new_connection_id = kReturnConnectionId;
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(original_connection_id, version_))
.WillOnce(Return(new_connection_id));
} else {
original_connection_id = TestConnectionId();
new_connection_id = original_connection_id;
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(original_connection_id, version_))
.WillOnce(Return(std::nullopt));
}
QuicConfig client_config = DefaultQuicConfig();
constexpr auto kCustomParameterId =
static_cast<TransportParameters::TransportParameterId>(0xff33);
std::string kCustomParameterValue(2000, '-');
client_config.custom_transport_parameters_to_send()[kCustomParameterId] =
kCustomParameterValue;
std::vector<std::unique_ptr<QuicReceivedPacket>> packets =
GetFirstFlightOfPackets(version_, client_config, original_connection_id,
EmptyQuicConnectionId(),
TestClientCryptoConfig());
ASSERT_EQ(packets.size(), 2u);
if (add_reordering) {
std::swap(packets[0], packets[1]);
}
ProcessReceivedPacket(std::move(packets[0]), client_address, version_,
original_connection_id);
EXPECT_EQ(dispatcher_->NumSessions(), 0u)
<< "No session should be created before the rest of the CHLO arrives.";
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(new_connection_id, _, client_address,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, new_connection_id, client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(2);
ProcessReceivedPacket(std::move(packets[1]), client_address, version_,
original_connection_id);
EXPECT_EQ(dispatcher_->NumSessions(), 1u);
}
TEST_P(QuicDispatcherTestAllVersions, TlsMultiPacketClientHello) {
TestTlsMultiPacketClientHello(false,
false);
}
TEST_P(QuicDispatcherTestAllVersions, TlsMultiPacketClientHelloWithReordering) {
TestTlsMultiPacketClientHello(true,
false);
}
TEST_P(QuicDispatcherTestAllVersions, TlsMultiPacketClientHelloWithLongId) {
TestTlsMultiPacketClientHello(false,
true);
}
TEST_P(QuicDispatcherTestAllVersions,
TlsMultiPacketClientHelloWithReorderingAndLongId) {
TestTlsMultiPacketClientHello(true,
true);
}
TEST_P(QuicDispatcherTestAllVersions, ProcessPackets) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(2), _, client_address,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(2), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session2_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session2_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(2), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(2));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(1)
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessPacket(client_address, TestConnectionId(1), false, "data");
}
TEST_P(QuicDispatcherTestAllVersions, DispatcherDoesNotRejectPacketNumberZero) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(2)
.WillRepeatedly(
WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
ProcessPacket(client_address, TestConnectionId(1), false, version_, "", true,
CONNECTION_ID_PRESENT, PACKET_1BYTE_PACKET_NUMBER, 256);
}
TEST_P(QuicDispatcherTestOneVersion, StatelessVersionNegotiation) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(TestConnectionId(1), _, _, _, _, _, _, _))
.Times(1);
expect_generator_is_called_ = false;
ProcessFirstFlight(QuicVersionReservedForNegotiation(), client_address,
TestConnectionId(1));
}
TEST_P(QuicDispatcherTestOneVersion,
StatelessVersionNegotiationWithVeryLongConnectionId) {
QuicConnectionId connection_id = QuicUtils::CreateRandomConnectionId(33);
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(connection_id, _, _, _, _, _, _, _))
.Times(1);
expect_generator_is_called_ = false;
ProcessFirstFlight(QuicVersionReservedForNegotiation(), client_address,
connection_id);
}
TEST_P(QuicDispatcherTestOneVersion,
StatelessVersionNegotiationWithClientConnectionId) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(
TestConnectionId(1), TestConnectionId(2), _, _, _, _, _, _))
.Times(1);
expect_generator_is_called_ = false;
ProcessFirstFlight(QuicVersionReservedForNegotiation(), client_address,
TestConnectionId(1), TestConnectionId(2));
}
TEST_P(QuicDispatcherTestOneVersion, NoVersionNegotiationWithSmallPacket) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, _, _, _, _, _, _))
.Times(0);
std::string chlo = SerializeCHLO() + std::string(1200, 'a');
QUICHE_DCHECK_LE(1200u, chlo.length());
std::string truncated_chlo = chlo.substr(0, 1100);
QUICHE_DCHECK_EQ(1100u, truncated_chlo.length());
ProcessPacket(client_address, TestConnectionId(1), true,
QuicVersionReservedForNegotiation(), truncated_chlo, false,
CONNECTION_ID_PRESENT, PACKET_4BYTE_PACKET_NUMBER, 1);
}
TEST_P(QuicDispatcherTestOneVersion,
VersionNegotiationWithoutChloSizeValidation) {
crypto_config_.set_validate_chlo_size(false);
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, _, _, _, _, _, _))
.Times(1);
std::string chlo = SerializeCHLO() + std::string(1200, 'a');
QUICHE_DCHECK_LE(1200u, chlo.length());
std::string truncated_chlo = chlo.substr(0, 1100);
QUICHE_DCHECK_EQ(1100u, truncated_chlo.length());
ProcessPacket(client_address, TestConnectionId(1), true,
QuicVersionReservedForNegotiation(), truncated_chlo, true,
CONNECTION_ID_PRESENT, PACKET_4BYTE_PACKET_NUMBER, 1);
}
TEST_P(QuicDispatcherTestAllVersions, Shutdown) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
dispatcher_->Shutdown();
}
TEST_P(QuicDispatcherTestAllVersions, TimeWaitListManager) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId connection_id = TestConnectionId(1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(connection_id, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, connection_id, client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, connection_id);
session1_->connection()->CloseConnection(
QUIC_INVALID_VERSION,
"Server: Packet 2 without version flag before version negotiated.",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_TRUE(time_wait_list_manager_->IsConnectionIdInTimeWait(connection_id));
EXPECT_CALL(*time_wait_list_manager_,
ProcessPacket(_, _, connection_id, _, _, _))
.Times(1);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
ProcessPacket(client_address, connection_id, true, "data");
}
TEST_P(QuicDispatcherTestAllVersions, NoVersionPacketToTimeWaitListManager) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId connection_id = TestConnectionId(1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
ProcessPacket(_, _, connection_id, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(1);
ProcessPacket(client_address, connection_id, false,
"data");
}
TEST_P(QuicDispatcherTestAllVersions,
DonotTimeWaitPacketsWithUnknownConnectionIdAndNoVersion) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t short_packet[22] = {0x70, 0xa7, 0x02, 0x6b};
uint8_t valid_size_packet[23] = {0x70, 0xa7, 0x02, 0x6c};
size_t short_packet_len = 21;
QuicReceivedPacket packet(reinterpret_cast<char*>(short_packet),
short_packet_len, QuicTime::Zero());
QuicReceivedPacket packet2(reinterpret_cast<char*>(valid_size_packet),
short_packet_len + 1, QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(0xa7))
.WillOnce(Return(kQuicDefaultConnectionIdLength));
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(0);
dispatcher_->ProcessPacket(server_address_, client_address, packet);
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(0xa7))
.WillOnce(Return(kQuicDefaultConnectionIdLength));
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, packet2);
}
TEST_P(QuicDispatcherTestOneVersion, DropPacketWithInvalidFlags) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t all_zero_packet[1200] = {};
QuicReceivedPacket packet(reinterpret_cast<char*>(all_zero_packet),
sizeof(all_zero_packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(_))
.WillOnce(Return(kQuicDefaultConnectionIdLength));
dispatcher_->ProcessPacket(server_address_, client_address, packet);
}
TEST_P(QuicDispatcherTestAllVersions, LimitResetsToSameClientAddress) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress client_address2(QuicIpAddress::Loopback4(), 2);
QuicSocketAddress client_address3(QuicIpAddress::Loopback6(), 1);
QuicConnectionId connection_id = TestConnectionId(1);
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(1);
ProcessPacket(client_address, connection_id, false,
"data");
ProcessPacket(client_address, connection_id, false,
"data2");
ProcessPacket(client_address, connection_id, false,
"data3");
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(2);
ProcessPacket(client_address2, connection_id, false,
"data");
ProcessPacket(client_address3, connection_id, false,
"data");
}
TEST_P(QuicDispatcherTestAllVersions,
StopSendingResetOnTooManyRecentAddresses) {
SetQuicFlag(quic_max_recent_stateless_reset_addresses, 2);
const size_t kTestLifeTimeMs = 10;
SetQuicFlag(quic_recent_stateless_reset_addresses_lifetime_ms,
kTestLifeTimeMs);
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicSocketAddress client_address2(QuicIpAddress::Loopback4(), 2);
QuicSocketAddress client_address3(QuicIpAddress::Loopback6(), 1);
QuicConnectionId connection_id = TestConnectionId(1);
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(2);
EXPECT_FALSE(GetClearResetAddressesAlarm()->IsSet());
ProcessPacket(client_address, connection_id, false,
"data");
const QuicTime expected_deadline =
mock_helper_.GetClock()->Now() +
QuicTime::Delta::FromMilliseconds(kTestLifeTimeMs);
ASSERT_TRUE(GetClearResetAddressesAlarm()->IsSet());
EXPECT_EQ(expected_deadline, GetClearResetAddressesAlarm()->deadline());
mock_helper_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
ProcessPacket(client_address2, connection_id, false,
"data");
ASSERT_TRUE(GetClearResetAddressesAlarm()->IsSet());
EXPECT_EQ(expected_deadline, GetClearResetAddressesAlarm()->deadline());
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(0);
ProcessPacket(client_address3, connection_id, false,
"data");
mock_helper_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
GetClearResetAddressesAlarm()->Fire();
EXPECT_CALL(*time_wait_list_manager_, SendPublicReset(_, _, _, _, _, _))
.Times(2);
ProcessPacket(client_address, connection_id, false,
"data");
ProcessPacket(client_address2, connection_id, false,
"data");
ProcessPacket(client_address3, connection_id, false,
"data");
}
TEST_P(QuicDispatcherTestAllVersions, LongConnectionIdLengthReplaced) {
if (!version_.AllowsVariableLengthConnectionIds()) {
return;
}
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId bad_connection_id = TestConnectionIdNineBytesLong(2);
generated_connection_id_ = kReturnConnectionId;
EXPECT_CALL(*dispatcher_,
CreateQuicSession(*generated_connection_id_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, *generated_connection_id_, client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, bad_connection_id](const QuicEncryptedPacket& packet) {
ValidatePacket(bad_connection_id, packet);
})));
ProcessFirstFlight(client_address, bad_connection_id);
}
TEST_P(QuicDispatcherTestAllVersions, MixGoodAndBadConnectionIdLengthPackets) {
if (!version_.AllowsVariableLengthConnectionIds()) {
return;
}
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId bad_connection_id = TestConnectionIdNineBytesLong(2);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
generated_connection_id_ = kReturnConnectionId;
EXPECT_CALL(*dispatcher_,
CreateQuicSession(*generated_connection_id_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, *generated_connection_id_, client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session2_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session2_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, bad_connection_id](const QuicEncryptedPacket& packet) {
ValidatePacket(bad_connection_id, packet);
})));
ProcessFirstFlight(client_address, bad_connection_id);
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(1)
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessPacket(client_address, TestConnectionId(1), false, "data");
}
TEST_P(QuicDispatcherTestAllVersions, ProcessPacketWithZeroPort) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 0);
EXPECT_CALL(*dispatcher_, CreateQuicSession(TestConnectionId(1), _,
client_address, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
ProcessPacket(client_address, TestConnectionId(1), true,
"data");
}
TEST_P(QuicDispatcherTestAllVersions, ProcessPacketWithBlockedPort) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 17);
EXPECT_CALL(*dispatcher_, CreateQuicSession(TestConnectionId(1), _,
client_address, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
ProcessPacket(client_address, TestConnectionId(1), true,
"data");
}
TEST_P(QuicDispatcherTestAllVersions, ProcessPacketWithNonBlockedPort) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 443);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
ProcessFirstFlight(client_address, TestConnectionId(1));
}
TEST_P(QuicDispatcherTestAllVersions,
DropPacketWithKnownVersionAndInvalidShortInitialConnectionId) {
if (!version_.AllowsVariableLengthConnectionIds()) {
return;
}
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(0x00))
.WillOnce(Return(10));
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
expect_generator_is_called_ = false;
ProcessFirstFlight(client_address, EmptyQuicConnectionId());
}
TEST_P(QuicDispatcherTestAllVersions,
DropPacketWithKnownVersionAndInvalidInitialConnectionId) {
CreateTimeWaitListManager();
QuicSocketAddress server_address;
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
absl::string_view cid_str = "123456789abcdefg123456789abcdefg";
QuicConnectionId invalid_connection_id(cid_str.data(), cid_str.length());
QuicReceivedPacket packet("packet", 6, QuicTime::Zero());
ReceivedPacketInfo packet_info(server_address, client_address, packet);
packet_info.version_flag = true;
packet_info.version = version_;
packet_info.destination_connection_id = invalid_connection_id;
ASSERT_TRUE(dispatcher_->MaybeDispatchPacket(packet_info));
}
void QuicDispatcherTestBase::
TestVersionNegotiationForUnknownVersionInvalidShortInitialConnectionId(
const QuicConnectionId& server_connection_id,
const QuicConnectionId& client_connection_id) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(
server_connection_id, client_connection_id,
true,
true, _, _, client_address, _))
.Times(1);
expect_generator_is_called_ = false;
EXPECT_CALL(connection_id_generator_, ConnectionIdLength(_)).Times(0);
ProcessFirstFlight(ParsedQuicVersion::ReservedForNegotiation(),
client_address, server_connection_id,
client_connection_id);
}
TEST_P(QuicDispatcherTestOneVersion,
VersionNegotiationForUnknownVersionInvalidShortInitialConnectionId) {
TestVersionNegotiationForUnknownVersionInvalidShortInitialConnectionId(
EmptyQuicConnectionId(), EmptyQuicConnectionId());
}
TEST_P(QuicDispatcherTestOneVersion,
VersionNegotiationForUnknownVersionInvalidShortInitialConnectionId2) {
char server_connection_id_bytes[3] = {1, 2, 3};
QuicConnectionId server_connection_id(server_connection_id_bytes,
sizeof(server_connection_id_bytes));
TestVersionNegotiationForUnknownVersionInvalidShortInitialConnectionId(
server_connection_id, EmptyQuicConnectionId());
}
TEST_P(QuicDispatcherTestOneVersion,
VersionNegotiationForUnknownVersionInvalidShortInitialConnectionId3) {
char client_connection_id_bytes[8] = {1, 2, 3, 4, 5, 6, 7, 8};
QuicConnectionId client_connection_id(client_connection_id_bytes,
sizeof(client_connection_id_bytes));
TestVersionNegotiationForUnknownVersionInvalidShortInitialConnectionId(
EmptyQuicConnectionId(), client_connection_id);
}
TEST_P(QuicDispatcherTestOneVersion, VersionsChangeInFlight) {
VerifyVersionNotSupported(QuicVersionReservedForNegotiation());
for (ParsedQuicVersion version : CurrentSupportedVersions()) {
VerifyVersionSupported(version);
QuicDisableVersion(version);
VerifyVersionNotSupported(version);
QuicEnableVersion(version);
VerifyVersionSupported(version);
}
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionDraft28WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 0xFF, 0x00, 0x00, 28, 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionDraft27WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 0xFF, 0x00, 0x00, 27, 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionDraft25WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 0xFF, 0x00, 0x00, 25, 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionT050WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 'T', '0', '5', '0', 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ049WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 'Q', '0', '4', '9', 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ048WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 'Q', '0', '4', '8', 0x50};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
false, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ047WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 'Q', '0', '4', '7', 0x50};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
false, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ045WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xC0, 'Q', '0', '4', '5', 0x50};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
ABSL_ARRAYSIZE(packet), QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
false, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ044WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet44[kMinPacketSizeForVersionNegotiation] = {
0xFF, 'Q', '0', '4', '4', 0x50};
QuicReceivedPacket received_packet44(reinterpret_cast<char*>(packet44),
kMinPacketSizeForVersionNegotiation,
QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
false, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address,
received_packet44);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionQ050WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xFF, 'Q', '0', '5', '0', 0x50};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
kMinPacketSizeForVersionNegotiation,
QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
TEST_P(QuicDispatcherTestOneVersion,
RejectDeprecatedVersionT051WithVersionNegotiation) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
uint8_t packet[kMinPacketSizeForVersionNegotiation] = {
0xFF, 'T', '0', '5', '1', 0x08};
QuicReceivedPacket received_packet(reinterpret_cast<char*>(packet),
kMinPacketSizeForVersionNegotiation,
QuicTime::Zero());
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(_, _, true,
true, _, _, _, _))
.Times(1);
dispatcher_->ProcessPacket(server_address_, client_address, received_packet);
}
static_assert(quic::SupportedVersions().size() == 4u,
"Please add new RejectDeprecatedVersion tests above this assert "
"when deprecating versions");
TEST_P(QuicDispatcherTestOneVersion, VersionNegotiationProbe) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
CreateTimeWaitListManager();
char packet[1200];
char destination_connection_id_bytes[] = {0x56, 0x4e, 0x20, 0x70,
0x6c, 0x7a, 0x20, 0x21};
EXPECT_TRUE(QuicFramer::WriteClientVersionNegotiationProbePacket(
packet, sizeof(packet), destination_connection_id_bytes,
sizeof(destination_connection_id_bytes)));
QuicEncryptedPacket encrypted(packet, sizeof(packet), false);
std::unique_ptr<QuicReceivedPacket> received_packet(
ConstructReceivedPacket(encrypted, mock_helper_.GetClock()->Now()));
QuicConnectionId client_connection_id = EmptyQuicConnectionId();
QuicConnectionId server_connection_id(
destination_connection_id_bytes, sizeof(destination_connection_id_bytes));
EXPECT_CALL(*time_wait_list_manager_,
SendVersionNegotiationPacket(
server_connection_id, client_connection_id,
true, true, _, _, _, _))
.Times(1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
dispatcher_->ProcessPacket(server_address_, client_address, *received_packet);
}
class SavingWriter : public QuicPacketWriterWrapper {
public:
bool IsWriteBlocked() const override { return false; }
WriteResult WritePacket(const char* buffer, size_t buf_len,
const QuicIpAddress& ,
const QuicSocketAddress& ,
PerPacketOptions* ,
const QuicPacketWriterParams& ) override {
packets_.push_back(
QuicEncryptedPacket(buffer, buf_len, false).Clone());
return WriteResult(WRITE_STATUS_OK, buf_len);
}
std::vector<std::unique_ptr<QuicEncryptedPacket>>* packets() {
return &packets_;
}
private:
std::vector<std::unique_ptr<QuicEncryptedPacket>> packets_;
};
TEST_P(QuicDispatcherTestOneVersion, VersionNegotiationProbeEndToEnd) {
SavingWriter* saving_writer = new SavingWriter();
QuicDispatcherPeer::UseWriter(dispatcher_.get(), saving_writer);
QuicTimeWaitListManager* time_wait_list_manager = new QuicTimeWaitListManager(
saving_writer, dispatcher_.get(), mock_helper_.GetClock(),
&mock_alarm_factory_);
QuicDispatcherPeer::SetTimeWaitListManager(dispatcher_.get(),
time_wait_list_manager);
char packet[1200] = {};
char destination_connection_id_bytes[] = {0x56, 0x4e, 0x20, 0x70,
0x6c, 0x7a, 0x20, 0x21};
EXPECT_TRUE(QuicFramer::WriteClientVersionNegotiationProbePacket(
packet, sizeof(packet), destination_connection_id_bytes,
sizeof(destination_connection_id_bytes)));
QuicEncryptedPacket encrypted(packet, sizeof(packet), false);
std::unique_ptr<QuicReceivedPacket> received_packet(
ConstructReceivedPacket(encrypted, mock_helper_.GetClock()->Now()));
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
dispatcher_->ProcessPacket(server_address_, client_address, *received_packet);
ASSERT_EQ(1u, saving_writer->packets()->size());
char source_connection_id_bytes[255] = {};
uint8_t source_connection_id_length = sizeof(source_connection_id_bytes);
std::string detailed_error = "foobar";
EXPECT_TRUE(QuicFramer::ParseServerVersionNegotiationProbeResponse(
(*(saving_writer->packets()))[0]->data(),
(*(saving_writer->packets()))[0]->length(), source_connection_id_bytes,
&source_connection_id_length, &detailed_error));
EXPECT_EQ("", detailed_error);
quiche::test::CompareCharArraysWithHexError(
"parsed probe", source_connection_id_bytes, source_connection_id_length,
destination_connection_id_bytes, sizeof(destination_connection_id_bytes));
}
TEST_P(QuicDispatcherTestOneVersion, AndroidConformanceTest) {
SavingWriter* saving_writer = new SavingWriter();
QuicDispatcherPeer::UseWriter(dispatcher_.get(), saving_writer);
QuicTimeWaitListManager* time_wait_list_manager = new QuicTimeWaitListManager(
saving_writer, dispatcher_.get(), mock_helper_.GetClock(),
&mock_alarm_factory_);
QuicDispatcherPeer::SetTimeWaitListManager(dispatcher_.get(),
time_wait_list_manager);
static const unsigned char packet[1200] = {
0xc0,
0xaa, 0xda, 0xca, 0xca,
0x08,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x00,
};
QuicEncryptedPacket encrypted(reinterpret_cast<const char*>(packet),
sizeof(packet), false);
std::unique_ptr<QuicReceivedPacket> received_packet(
ConstructReceivedPacket(encrypted, mock_helper_.GetClock()->Now()));
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
dispatcher_->ProcessPacket(server_address_, client_address, *received_packet);
ASSERT_EQ(1u, saving_writer->packets()->size());
ASSERT_GE((*(saving_writer->packets()))[0]->length(), 15u);
quiche::test::CompareCharArraysWithHexError(
"response connection ID", &(*(saving_writer->packets()))[0]->data()[7], 8,
reinterpret_cast<const char*>(&packet[6]), 8);
}
TEST_P(QuicDispatcherTestOneVersion, AndroidConformanceTestOld) {
SavingWriter* saving_writer = new SavingWriter();
QuicDispatcherPeer::UseWriter(dispatcher_.get(), saving_writer);
QuicTimeWaitListManager* time_wait_list_manager = new QuicTimeWaitListManager(
saving_writer, dispatcher_.get(), mock_helper_.GetClock(),
&mock_alarm_factory_);
QuicDispatcherPeer::SetTimeWaitListManager(dispatcher_.get(),
time_wait_list_manager);
static const unsigned char packet[1200] = {
0x0d,
0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0xaa, 0xda, 0xca, 0xaa,
0x01,
0x00,
0x07,
};
QuicEncryptedPacket encrypted(reinterpret_cast<const char*>(packet),
sizeof(packet), false);
std::unique_ptr<QuicReceivedPacket> received_packet(
ConstructReceivedPacket(encrypted, mock_helper_.GetClock()->Now()));
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
dispatcher_->ProcessPacket(server_address_, client_address, *received_packet);
ASSERT_EQ(1u, saving_writer->packets()->size());
static const char connection_id_bytes[] = {0x71, 0x72, 0x73, 0x74,
0x75, 0x76, 0x77, 0x78};
ASSERT_GE((*(saving_writer->packets()))[0]->length(),
1u + sizeof(connection_id_bytes));
quiche::test::CompareCharArraysWithHexError(
"response connection ID", &(*(saving_writer->packets()))[0]->data()[1],
sizeof(connection_id_bytes), connection_id_bytes,
sizeof(connection_id_bytes));
}
TEST_P(QuicDispatcherTestAllVersions, DoNotProcessSmallPacket) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, SendPacket(_, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
ProcessPacket(client_address, TestConnectionId(1), true,
version_, SerializeCHLO(), false,
CONNECTION_ID_PRESENT, PACKET_4BYTE_PACKET_NUMBER, 1);
}
TEST_P(QuicDispatcherTestAllVersions, ProcessSmallCoalescedPacket) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*time_wait_list_manager_, SendPacket(_, _, _)).Times(0);
uint8_t coalesced_packet[1200] = {
0xC3,
'Q', '0', '9', '9',
0x08,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x00,
0x05,
0x12, 0x34, 0x56, 0x78,
0x00,
0xC3,
'Q', '0', '9', '9',
0x08,
0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
0x00,
0x1E,
0x12, 0x34, 0x56, 0x79,
};
QuicReceivedPacket packet(reinterpret_cast<char*>(coalesced_packet), 1200,
QuicTime::Zero());
dispatcher_->ProcessPacket(server_address_, client_address, packet);
}
TEST_P(QuicDispatcherTestAllVersions, StopAcceptingNewConnections) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
dispatcher_->StopAcceptingNewConnections();
EXPECT_FALSE(dispatcher_->accept_new_connections());
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(2), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.Times(0u);
expect_generator_is_called_ = false;
ProcessFirstFlight(client_address, TestConnectionId(2));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(1u)
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessPacket(client_address, TestConnectionId(1), false, "data");
}
TEST_P(QuicDispatcherTestAllVersions, StartAcceptingNewConnections) {
dispatcher_->StopAcceptingNewConnections();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(2), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.Times(0u);
expect_generator_is_called_ = false;
ProcessFirstFlight(client_address, TestConnectionId(2));
dispatcher_->StartAcceptingNewConnections();
EXPECT_TRUE(dispatcher_->accept_new_connections());
expect_generator_is_called_ = true;
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(1), _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
}
TEST_P(QuicDispatcherTestOneVersion, SelectAlpn) {
EXPECT_EQ(QuicDispatcherPeer::SelectAlpn(dispatcher_.get(), {}), "");
EXPECT_EQ(QuicDispatcherPeer::SelectAlpn(dispatcher_.get(), {""}), "");
EXPECT_EQ(QuicDispatcherPeer::SelectAlpn(dispatcher_.get(), {"hq"}), "hq");
QuicEnableVersion(ParsedQuicVersion::Q046());
EXPECT_EQ(
QuicDispatcherPeer::SelectAlpn(dispatcher_.get(), {"h3-Q033", "h3-Q046"}),
"h3-Q046");
}
TEST_P(QuicDispatcherTestNoVersions, VersionNegotiationFromReservedVersion) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(TestConnectionId(1), _, _, _, _, _, _, _))
.Times(1);
expect_generator_is_called_ = false;
ProcessFirstFlight(QuicVersionReservedForNegotiation(), client_address,
TestConnectionId(1));
}
TEST_P(QuicDispatcherTestNoVersions, VersionNegotiationFromRealVersion) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(
*time_wait_list_manager_,
SendVersionNegotiationPacket(TestConnectionId(1), _, _, _, _, _, _, _))
.Times(1);
expect_generator_is_called_ = false;
ProcessFirstFlight(version_, client_address, TestConnectionId(1));
}
class QuicDispatcherTestStrayPacketConnectionId
: public QuicDispatcherTestBase {};
INSTANTIATE_TEST_SUITE_P(QuicDispatcherTestsStrayPacketConnectionId,
QuicDispatcherTestStrayPacketConnectionId,
::testing::ValuesIn(CurrentSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicDispatcherTestStrayPacketConnectionId,
StrayPacketTruncatedConnectionId) {
CreateTimeWaitListManager();
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
QuicConnectionId connection_id = TestConnectionId(1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, _, _, _, _, _)).Times(0);
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, _, _, _, _))
.Times(0);
EXPECT_CALL(*time_wait_list_manager_, AddConnectionIdToTimeWait(_, _))
.Times(0);
ProcessPacket(client_address, connection_id, true, "data",
CONNECTION_ID_ABSENT, PACKET_4BYTE_PACKET_NUMBER);
}
class BlockingWriter : public QuicPacketWriterWrapper {
public:
BlockingWriter() : write_blocked_(false) {}
bool IsWriteBlocked() const override { return write_blocked_; }
void SetWritable() override { write_blocked_ = false; }
WriteResult WritePacket(const char* , size_t ,
const QuicIpAddress& ,
const QuicSocketAddress& ,
PerPacketOptions* ,
const QuicPacketWriterParams& ) override {
QUIC_LOG(DFATAL) << "Not supported";
return WriteResult();
}
bool write_blocked_;
};
class QuicDispatcherWriteBlockedListTest : public QuicDispatcherTestBase {
public:
void SetUp() override {
QuicDispatcherTestBase::SetUp();
writer_ = new BlockingWriter;
QuicDispatcherPeer::UseWriter(dispatcher_.get(), writer_);
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&helper_, &alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(2), client_address,
&helper_, &alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session2_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session2_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(2), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(2));
blocked_list_ = QuicDispatcherPeer::GetWriteBlockedList(dispatcher_.get());
}
void TearDown() override {
if (connection1() != nullptr) {
EXPECT_CALL(*connection1(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
}
if (connection2() != nullptr) {
EXPECT_CALL(*connection2(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
}
dispatcher_->Shutdown();
}
void SetBlocked() {
QUIC_LOG(INFO) << "set writer " << writer_ << " to blocked";
writer_->write_blocked_ = true;
}
void BlockConnection1() {
Connection1Writer()->write_blocked_ = true;
dispatcher_->OnWriteBlocked(connection1());
}
BlockingWriter* Connection1Writer() {
return static_cast<BlockingWriter*>(connection1()->writer());
}
void BlockConnection2() {
Connection2Writer()->write_blocked_ = true;
dispatcher_->OnWriteBlocked(connection2());
}
BlockingWriter* Connection2Writer() {
return static_cast<BlockingWriter*>(connection2()->writer());
}
protected:
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
BlockingWriter* writer_;
QuicBlockedWriterList* blocked_list_;
};
INSTANTIATE_TEST_SUITE_P(QuicDispatcherWriteBlockedListTests,
QuicDispatcherWriteBlockedListTest,
::testing::Values(CurrentSupportedVersions().front()),
::testing::PrintToStringParamName());
TEST_P(QuicDispatcherWriteBlockedListTest, BasicOnCanWrite) {
dispatcher_->OnCanWrite();
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
EXPECT_CALL(*connection1(), OnCanWrite());
dispatcher_->OnCanWrite();
EXPECT_CALL(*connection1(), OnCanWrite()).Times(0);
dispatcher_->OnCanWrite();
EXPECT_FALSE(dispatcher_->HasPendingWrites());
}
TEST_P(QuicDispatcherWriteBlockedListTest, OnCanWriteOrder) {
InSequence s;
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection2());
EXPECT_CALL(*connection1(), OnCanWrite());
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
SetBlocked();
dispatcher_->OnWriteBlocked(connection2());
dispatcher_->OnWriteBlocked(connection1());
EXPECT_CALL(*connection2(), OnCanWrite());
EXPECT_CALL(*connection1(), OnCanWrite());
dispatcher_->OnCanWrite();
}
TEST_P(QuicDispatcherWriteBlockedListTest, OnCanWriteRemove) {
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
blocked_list_->Remove(*connection1());
EXPECT_CALL(*connection1(), OnCanWrite()).Times(0);
dispatcher_->OnCanWrite();
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection2());
blocked_list_->Remove(*connection1());
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
blocked_list_->Remove(*connection1());
dispatcher_->OnWriteBlocked(connection1());
EXPECT_CALL(*connection1(), OnCanWrite()).Times(1);
dispatcher_->OnCanWrite();
}
TEST_P(QuicDispatcherWriteBlockedListTest, DoubleAdd) {
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection1());
blocked_list_->Remove(*connection1());
EXPECT_CALL(*connection1(), OnCanWrite()).Times(0);
dispatcher_->OnCanWrite();
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection1());
EXPECT_CALL(*connection1(), OnCanWrite()).Times(1);
dispatcher_->OnCanWrite();
}
TEST_P(QuicDispatcherWriteBlockedListTest, OnCanWriteHandleBlockConnection1) {
InSequence s;
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection2());
EXPECT_CALL(*connection1(), OnCanWrite())
.WillOnce(
Invoke(this, &QuicDispatcherWriteBlockedListTest::BlockConnection1));
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
EXPECT_TRUE(dispatcher_->HasPendingWrites());
EXPECT_CALL(*connection1(), OnCanWrite());
EXPECT_CALL(*connection2(), OnCanWrite()).Times(0);
dispatcher_->OnCanWrite();
EXPECT_FALSE(dispatcher_->HasPendingWrites());
}
TEST_P(QuicDispatcherWriteBlockedListTest, OnCanWriteHandleBlockConnection2) {
InSequence s;
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection2());
EXPECT_CALL(*connection1(), OnCanWrite());
EXPECT_CALL(*connection2(), OnCanWrite())
.WillOnce(
Invoke(this, &QuicDispatcherWriteBlockedListTest::BlockConnection2));
dispatcher_->OnCanWrite();
EXPECT_TRUE(dispatcher_->HasPendingWrites());
EXPECT_CALL(*connection1(), OnCanWrite()).Times(0);
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
EXPECT_FALSE(dispatcher_->HasPendingWrites());
}
TEST_P(QuicDispatcherWriteBlockedListTest,
OnCanWriteHandleBlockBothConnections) {
InSequence s;
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
dispatcher_->OnWriteBlocked(connection2());
EXPECT_CALL(*connection1(), OnCanWrite())
.WillOnce(
Invoke(this, &QuicDispatcherWriteBlockedListTest::BlockConnection1));
EXPECT_CALL(*connection2(), OnCanWrite())
.WillOnce(
Invoke(this, &QuicDispatcherWriteBlockedListTest::BlockConnection2));
dispatcher_->OnCanWrite();
EXPECT_TRUE(dispatcher_->HasPendingWrites());
EXPECT_CALL(*connection1(), OnCanWrite());
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
EXPECT_FALSE(dispatcher_->HasPendingWrites());
}
TEST_P(QuicDispatcherWriteBlockedListTest, PerConnectionWriterBlocked) {
EXPECT_EQ(dispatcher_->writer(), connection1()->writer());
EXPECT_EQ(dispatcher_->writer(), connection2()->writer());
connection2()->SetQuicPacketWriter(new BlockingWriter, true);
EXPECT_NE(dispatcher_->writer(), connection2()->writer());
BlockConnection2();
EXPECT_TRUE(dispatcher_->HasPendingWrites());
EXPECT_CALL(*connection2(), OnCanWrite());
dispatcher_->OnCanWrite();
EXPECT_FALSE(dispatcher_->HasPendingWrites());
}
TEST_P(QuicDispatcherWriteBlockedListTest,
RemoveConnectionFromWriteBlockedListWhenDeletingSessions) {
EXPECT_QUIC_BUG(
{
dispatcher_->OnConnectionClosed(
connection1()->connection_id(), QUIC_PACKET_WRITE_ERROR,
"Closed by test.", ConnectionCloseSource::FROM_SELF);
SetBlocked();
ASSERT_FALSE(dispatcher_->HasPendingWrites());
SetBlocked();
dispatcher_->OnWriteBlocked(connection1());
ASSERT_TRUE(dispatcher_->HasPendingWrites());
dispatcher_->DeleteSessions();
MarkSession1Deleted();
},
"QuicConnection was in WriteBlockedList before destruction");
}
class QuicDispatcherSupportMultipleConnectionIdPerConnectionTest
: public QuicDispatcherTestBase {
public:
QuicDispatcherSupportMultipleConnectionIdPerConnectionTest()
: QuicDispatcherTestBase(crypto_test_utils::ProofSourceForTesting()) {
dispatcher_ = std::make_unique<NiceMock<TestDispatcher>>(
&config_, &crypto_config_, &version_manager_,
mock_helper_.GetRandomGenerator(), connection_id_generator_);
}
void AddConnection1() {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(1), client_address,
&helper_, &alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(1), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(1));
}
void AddConnection2() {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 2);
EXPECT_CALL(*dispatcher_, CreateQuicSession(_, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(2), client_address,
&helper_, &alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session2_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session2_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(Invoke([this](const QuicEncryptedPacket& packet) {
ValidatePacket(TestConnectionId(2), packet);
})));
ProcessFirstFlight(client_address, TestConnectionId(2));
}
protected:
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
};
INSTANTIATE_TEST_SUITE_P(
QuicDispatcherSupportMultipleConnectionIdPerConnectionTests,
QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
::testing::Values(CurrentSupportedVersions().front()),
::testing::PrintToStringParamName());
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
FailToAddExistingConnectionId) {
AddConnection1();
EXPECT_FALSE(dispatcher_->TryAddNewConnectionId(TestConnectionId(1),
TestConnectionId(1)));
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
TryAddNewConnectionId) {
AddConnection1();
ASSERT_EQ(dispatcher_->NumSessions(), 1u);
ASSERT_THAT(session1_, testing::NotNull());
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
{
mock_server_connection1->AddNewConnectionId(TestConnectionId(3));
EXPECT_EQ(dispatcher_->NumSessions(), 1u);
auto* session =
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(3));
ASSERT_EQ(session, session1_);
}
{
mock_server_connection1->AddNewConnectionId(TestConnectionId(4));
EXPECT_EQ(dispatcher_->NumSessions(), 1u);
auto* session =
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(4));
ASSERT_EQ(session, session1_);
}
EXPECT_CALL(*connection1(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
dispatcher_->Shutdown();
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
TryAddNewConnectionIdWithCollision) {
AddConnection1();
AddConnection2();
ASSERT_EQ(dispatcher_->NumSessions(), 2u);
ASSERT_THAT(session1_, testing::NotNull());
ASSERT_THAT(session2_, testing::NotNull());
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
MockServerConnection* mock_server_connection2 =
reinterpret_cast<MockServerConnection*>(connection2());
{
mock_server_connection1->UnconditionallyAddNewConnectionIdForTest(
TestConnectionId(2));
EXPECT_EQ(dispatcher_->NumSessions(), 2u);
auto* session =
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(2));
ASSERT_EQ(session, session2_);
EXPECT_THAT(mock_server_connection1->GetActiveServerConnectionIds(),
testing::ElementsAre(TestConnectionId(1), TestConnectionId(2)));
}
{
mock_server_connection2->AddNewConnectionId(TestConnectionId(3));
EXPECT_EQ(dispatcher_->NumSessions(), 2u);
auto* session =
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(3));
ASSERT_EQ(session, session2_);
EXPECT_THAT(mock_server_connection2->GetActiveServerConnectionIds(),
testing::ElementsAre(TestConnectionId(2), TestConnectionId(3)));
}
dispatcher_->OnConnectionClosed(TestConnectionId(2),
QuicErrorCode::QUIC_NO_ERROR, "detail",
quic::ConnectionCloseSource::FROM_SELF);
EXPECT_QUICHE_BUG(dispatcher_->OnConnectionClosed(
TestConnectionId(1), QuicErrorCode::QUIC_NO_ERROR,
"detail", quic::ConnectionCloseSource::FROM_SELF),
"Missing session for cid");
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
MismatchedSessionAfterAddingCollidedConnectionId) {
AddConnection1();
AddConnection2();
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
{
mock_server_connection1->UnconditionallyAddNewConnectionIdForTest(
TestConnectionId(2));
EXPECT_EQ(dispatcher_->NumSessions(), 2u);
auto* session =
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(2));
ASSERT_EQ(session, session2_);
EXPECT_THAT(mock_server_connection1->GetActiveServerConnectionIds(),
testing::ElementsAre(TestConnectionId(1), TestConnectionId(2)));
}
EXPECT_QUIC_BUG(dispatcher_->OnConnectionClosed(
TestConnectionId(1), QuicErrorCode::QUIC_NO_ERROR,
"detail", quic::ConnectionCloseSource::FROM_SELF),
"Session is mismatched in the map");
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
RetireConnectionIdFromSingleConnection) {
AddConnection1();
ASSERT_EQ(dispatcher_->NumSessions(), 1u);
ASSERT_THAT(session1_, testing::NotNull());
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
for (int i = 2; i < 10; ++i) {
mock_server_connection1->AddNewConnectionId(TestConnectionId(i));
ASSERT_EQ(
QuicDispatcherPeer::FindSession(dispatcher_.get(), TestConnectionId(i)),
session1_);
ASSERT_EQ(QuicDispatcherPeer::FindSession(dispatcher_.get(),
TestConnectionId(i - 1)),
session1_);
EXPECT_EQ(dispatcher_->NumSessions(), 1u);
if (i % 2 == 1) {
mock_server_connection1->RetireConnectionId(TestConnectionId(i - 2));
mock_server_connection1->RetireConnectionId(TestConnectionId(i - 1));
}
}
EXPECT_CALL(*connection1(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
dispatcher_->Shutdown();
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
RetireConnectionIdFromMultipleConnections) {
AddConnection1();
AddConnection2();
ASSERT_EQ(dispatcher_->NumSessions(), 2u);
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
MockServerConnection* mock_server_connection2 =
reinterpret_cast<MockServerConnection*>(connection2());
for (int i = 2; i < 10; ++i) {
mock_server_connection1->AddNewConnectionId(TestConnectionId(2 * i - 1));
mock_server_connection2->AddNewConnectionId(TestConnectionId(2 * i));
ASSERT_EQ(QuicDispatcherPeer::FindSession(dispatcher_.get(),
TestConnectionId(2 * i - 1)),
session1_);
ASSERT_EQ(QuicDispatcherPeer::FindSession(dispatcher_.get(),
TestConnectionId(2 * i)),
session2_);
EXPECT_EQ(dispatcher_->NumSessions(), 2u);
mock_server_connection1->RetireConnectionId(TestConnectionId(2 * i - 3));
mock_server_connection2->RetireConnectionId(TestConnectionId(2 * i - 2));
}
mock_server_connection1->AddNewConnectionId(TestConnectionId(19));
mock_server_connection2->AddNewConnectionId(TestConnectionId(20));
EXPECT_CALL(*connection1(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
EXPECT_CALL(*connection2(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
dispatcher_->Shutdown();
}
TEST_P(QuicDispatcherSupportMultipleConnectionIdPerConnectionTest,
TimeWaitListPoplulateCorrectly) {
QuicTimeWaitListManager* time_wait_list_manager =
QuicDispatcherPeer::GetTimeWaitListManager(dispatcher_.get());
AddConnection1();
MockServerConnection* mock_server_connection1 =
reinterpret_cast<MockServerConnection*>(connection1());
mock_server_connection1->AddNewConnectionId(TestConnectionId(2));
mock_server_connection1->AddNewConnectionId(TestConnectionId(3));
mock_server_connection1->AddNewConnectionId(TestConnectionId(4));
mock_server_connection1->RetireConnectionId(TestConnectionId(1));
mock_server_connection1->RetireConnectionId(TestConnectionId(2));
EXPECT_CALL(*connection1(), CloseConnection(QUIC_PEER_GOING_AWAY, _, _));
connection1()->CloseConnection(
QUIC_PEER_GOING_AWAY, "Close for testing",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_FALSE(
time_wait_list_manager->IsConnectionIdInTimeWait(TestConnectionId(1)));
EXPECT_FALSE(
time_wait_list_manager->IsConnectionIdInTimeWait(TestConnectionId(2)));
EXPECT_TRUE(
time_wait_list_manager->IsConnectionIdInTimeWait(TestConnectionId(3)));
EXPECT_TRUE(
time_wait_list_manager->IsConnectionIdInTimeWait(TestConnectionId(4)));
dispatcher_->Shutdown();
}
class BufferedPacketStoreTest : public QuicDispatcherTestBase {
public:
BufferedPacketStoreTest()
: QuicDispatcherTestBase(),
client_addr_(QuicIpAddress::Loopback4(), 1234) {}
void ProcessFirstFlight(const ParsedQuicVersion& version,
const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
QuicDispatcherTestBase::ProcessFirstFlight(version, peer_address,
server_connection_id);
}
void ProcessFirstFlight(const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
ProcessFirstFlight(version_, peer_address, server_connection_id);
}
void ProcessFirstFlight(const QuicConnectionId& server_connection_id) {
ProcessFirstFlight(client_addr_, server_connection_id);
}
void ProcessFirstFlight(const ParsedQuicVersion& version,
const QuicConnectionId& server_connection_id) {
ProcessFirstFlight(version, client_addr_, server_connection_id);
}
void ProcessUndecryptableEarlyPacket(
const ParsedQuicVersion& version, const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
QuicDispatcherTestBase::ProcessUndecryptableEarlyPacket(
version, peer_address, server_connection_id);
}
void ProcessUndecryptableEarlyPacket(
const QuicSocketAddress& peer_address,
const QuicConnectionId& server_connection_id) {
ProcessUndecryptableEarlyPacket(version_, peer_address,
server_connection_id);
}
void ProcessUndecryptableEarlyPacket(
const QuicConnectionId& server_connection_id) {
ProcessUndecryptableEarlyPacket(version_, client_addr_,
server_connection_id);
}
protected:
QuicSocketAddress client_addr_;
};
INSTANTIATE_TEST_SUITE_P(BufferedPacketStoreTests, BufferedPacketStoreTest,
::testing::ValuesIn(CurrentSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(BufferedPacketStoreTest, ProcessNonChloPacketBeforeChlo) {
InSequence s;
QuicConnectionId conn_id = TestConnectionId(1);
ProcessUndecryptableEarlyPacket(conn_id);
EXPECT_EQ(0u, dispatcher_->NumSessions())
<< "No session should be created before CHLO arrives.";
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(conn_id, version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_,
CreateQuicSession(conn_id, _, client_addr_, Eq(ExpectedAlpn()), _,
MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_addr_, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(2)
.WillRepeatedly(
WithArg<2>(Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(conn_id, packet);
}
})));
expect_generator_is_called_ = false;
ProcessFirstFlight(conn_id);
}
TEST_P(BufferedPacketStoreTest, ProcessNonChloPacketsUptoLimitAndProcessChlo) {
InSequence s;
QuicConnectionId conn_id = TestConnectionId(1);
for (size_t i = 1; i <= kDefaultMaxUndecryptablePackets + 1; ++i) {
ProcessUndecryptableEarlyPacket(conn_id);
}
EXPECT_EQ(0u, dispatcher_->NumSessions())
<< "No session should be created before CHLO arrives.";
data_connection_map_[conn_id].pop_back();
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(conn_id, version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_, CreateQuicSession(conn_id, _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_addr_, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(kDefaultMaxUndecryptablePackets + 1)
.WillRepeatedly(
WithArg<2>(Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(conn_id, packet);
}
})));
expect_generator_is_called_ = false;
ProcessFirstFlight(conn_id);
}
TEST_P(BufferedPacketStoreTest,
ProcessNonChloPacketsForDifferentConnectionsUptoLimit) {
InSequence s;
size_t kNumConnections = kMaxConnectionsWithoutCHLO + 1;
for (size_t i = 1; i <= kNumConnections; ++i) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 20000 + i);
QuicConnectionId conn_id = TestConnectionId(i);
ProcessUndecryptableEarlyPacket(client_address, conn_id);
}
data_connection_map_[TestConnectionId(kNumConnections)].pop_front();
QuicDispatcherPeer::set_new_sessions_allowed_per_event_loop(dispatcher_.get(),
kNumConnections);
expect_generator_is_called_ = false;
for (size_t i = 1; i <= kNumConnections; ++i) {
QuicSocketAddress client_address(QuicIpAddress::Loopback4(), 20000 + i);
QuicConnectionId conn_id = TestConnectionId(i);
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(conn_id, version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_, CreateQuicSession(conn_id, _, client_address,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_address, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
size_t num_packet_to_process = i <= kMaxConnectionsWithoutCHLO ? 2u : 1u;
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, client_address, _))
.Times(num_packet_to_process)
.WillRepeatedly(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(conn_id, packet);
}
})));
ProcessFirstFlight(client_address, conn_id);
}
}
TEST_P(BufferedPacketStoreTest, DeliverEmptyPackets) {
QuicConnectionId conn_id = TestConnectionId(1);
EXPECT_CALL(*dispatcher_, CreateQuicSession(conn_id, _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_addr_, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, client_addr_, _));
ProcessFirstFlight(conn_id);
}
TEST_P(BufferedPacketStoreTest, ReceiveRetransmittedCHLO) {
InSequence s;
QuicConnectionId conn_id = TestConnectionId(1);
ProcessUndecryptableEarlyPacket(conn_id);
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(conn_id, version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_, CreateQuicSession(conn_id, _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.Times(1)
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_addr_, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(3)
.WillRepeatedly(
WithArg<2>(Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(conn_id, packet);
}
})));
std::vector<std::unique_ptr<QuicReceivedPacket>> packets =
GetFirstFlightOfPackets(version_, conn_id);
ASSERT_EQ(packets.size(), 1u);
ProcessReceivedPacket(packets[0]->Clone(), client_addr_, version_, conn_id);
ProcessReceivedPacket(std::move(packets[0]), client_addr_, version_, conn_id);
}
TEST_P(BufferedPacketStoreTest, ReceiveCHLOAfterExpiration) {
InSequence s;
CreateTimeWaitListManager();
QuicBufferedPacketStore* store =
QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
QuicBufferedPacketStorePeer::set_clock(store, mock_helper_.GetClock());
QuicConnectionId conn_id = TestConnectionId(1);
ProcessPacket(client_addr_, conn_id, true, absl::StrCat("data packet ", 2),
CONNECTION_ID_PRESENT, PACKET_4BYTE_PACKET_NUMBER,
2);
mock_helper_.AdvanceTime(
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs));
QuicAlarm* alarm = QuicBufferedPacketStorePeer::expiration_alarm(store);
alarm->Cancel();
store->OnExpirationTimeout();
ASSERT_TRUE(time_wait_list_manager_->IsConnectionIdInTimeWait(conn_id));
EXPECT_CALL(*time_wait_list_manager_, ProcessPacket(_, _, conn_id, _, _, _));
expect_generator_is_called_ = false;
ProcessFirstFlight(conn_id);
}
TEST_P(BufferedPacketStoreTest, ProcessCHLOsUptoLimitAndBufferTheRest) {
QuicBufferedPacketStore* store =
QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
const size_t kNumCHLOs =
kMaxNumSessionsToCreate + kDefaultMaxConnectionsInStore + 1;
for (uint64_t conn_id = 1; conn_id <= kNumCHLOs; ++conn_id) {
const bool should_drop =
(conn_id > kMaxNumSessionsToCreate + kDefaultMaxConnectionsInStore);
if (!should_drop) {
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(TestConnectionId(conn_id), version_))
.WillOnce(Return(std::nullopt));
}
if (conn_id <= kMaxNumSessionsToCreate) {
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id),
client_addr_, &mock_helper_, &mock_alarm_factory_,
&crypto_config_, QuicDispatcherPeer::GetCache(dispatcher_.get()),
&session1_))));
EXPECT_CALL(
*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
expect_generator_is_called_ = false;
ProcessFirstFlight(TestConnectionId(conn_id));
if (conn_id <= kMaxNumSessionsToCreate + kDefaultMaxConnectionsInStore &&
conn_id > kMaxNumSessionsToCreate) {
EXPECT_TRUE(store->HasChloForConnection(TestConnectionId(conn_id)));
} else {
EXPECT_FALSE(store->HasChloForConnection(TestConnectionId(conn_id)));
}
}
for (uint64_t conn_id = kMaxNumSessionsToCreate + 1;
conn_id <= kMaxNumSessionsToCreate + kDefaultMaxConnectionsInStore;
++conn_id) {
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id), client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(TestConnectionId(kNumCHLOs), version_))
.Times(0);
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(kNumCHLOs), _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.Times(0);
while (store->HasChlosBuffered()) {
dispatcher_->ProcessBufferedChlos(kMaxNumSessionsToCreate);
}
EXPECT_EQ(TestConnectionId(static_cast<size_t>(kMaxNumSessionsToCreate) +
kDefaultMaxConnectionsInStore),
session1_->connection_id());
}
TEST_P(BufferedPacketStoreTest,
ProcessCHLOsUptoLimitAndBufferWithDifferentConnectionIdGenerator) {
QuicBufferedPacketStore* store =
QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
const size_t kNumCHLOs = kMaxNumSessionsToCreate + 1;
for (uint64_t conn_id = 1; conn_id < kNumCHLOs; ++conn_id) {
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id), client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
ProcessFirstFlight(TestConnectionId(conn_id));
}
uint64_t conn_id = kNumCHLOs;
expect_generator_is_called_ = false;
MockConnectionIdGenerator generator2;
EXPECT_CALL(*dispatcher_, ConnectionIdGenerator())
.WillRepeatedly(ReturnRef(generator2));
const bool buffered_store_replace_cid = version_.UsesTls();
if (buffered_store_replace_cid) {
EXPECT_CALL(generator2,
MaybeReplaceConnectionId(TestConnectionId(conn_id), version_))
.WillOnce(Return(std::nullopt));
}
ProcessFirstFlight(TestConnectionId(conn_id));
EXPECT_TRUE(store->HasChloForConnection(TestConnectionId(conn_id)));
EXPECT_CALL(*dispatcher_, ConnectionIdGenerator())
.WillRepeatedly(ReturnRef(connection_id_generator_));
if (!buffered_store_replace_cid) {
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(TestConnectionId(conn_id), version_))
.WillOnce(Return(std::nullopt));
}
EXPECT_CALL(*dispatcher_, CreateQuicSession(TestConnectionId(conn_id), _,
client_addr_, Eq(ExpectedAlpn()),
_, MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id), client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(
WithArg<2>(Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
while (store->HasChlosBuffered()) {
dispatcher_->ProcessBufferedChlos(kMaxNumSessionsToCreate);
}
}
TEST_P(BufferedPacketStoreTest, BufferDuplicatedCHLO) {
for (uint64_t conn_id = 1; conn_id <= kMaxNumSessionsToCreate + 1;
++conn_id) {
if (conn_id <= kMaxNumSessionsToCreate) {
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id),
client_addr_, &mock_helper_, &mock_alarm_factory_,
&crypto_config_, QuicDispatcherPeer::GetCache(dispatcher_.get()),
&session1_))));
EXPECT_CALL(
*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
ProcessFirstFlight(TestConnectionId(conn_id));
}
QuicConnectionId last_connection =
TestConnectionId(kMaxNumSessionsToCreate + 1);
expect_generator_is_called_ = false;
ProcessFirstFlight(last_connection);
size_t packets_buffered = 2;
EXPECT_CALL(*dispatcher_, CreateQuicSession(last_connection, _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, last_connection, client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(packets_buffered)
.WillRepeatedly(WithArg<2>(
Invoke([this, last_connection](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(last_connection, packet);
}
})));
dispatcher_->ProcessBufferedChlos(kMaxNumSessionsToCreate);
}
TEST_P(BufferedPacketStoreTest, BufferNonChloPacketsUptoLimitWithChloBuffered) {
uint64_t last_conn_id = kMaxNumSessionsToCreate + 1;
QuicConnectionId last_connection_id = TestConnectionId(last_conn_id);
for (uint64_t conn_id = 1; conn_id <= last_conn_id; ++conn_id) {
if (conn_id <= kMaxNumSessionsToCreate) {
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id),
client_addr_, &mock_helper_, &mock_alarm_factory_,
&crypto_config_, QuicDispatcherPeer::GetCache(dispatcher_.get()),
&session1_))));
EXPECT_CALL(
*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillRepeatedly(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
ProcessFirstFlight(TestConnectionId(conn_id));
}
for (uint64_t i = 0; i <= kDefaultMaxUndecryptablePackets; ++i) {
ProcessPacket(client_addr_, last_connection_id, false, "data packet");
}
EXPECT_CALL(*dispatcher_,
CreateQuicSession(last_connection_id, _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, last_connection_id, client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
const QuicBufferedPacketStore* store =
QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
const QuicBufferedPacketStore::BufferedPacketList*
last_connection_buffered_packets =
QuicBufferedPacketStorePeer::FindBufferedPackets(store,
last_connection_id);
ASSERT_NE(last_connection_buffered_packets, nullptr);
ASSERT_EQ(last_connection_buffered_packets->buffered_packets.size(),
kDefaultMaxUndecryptablePackets);
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(last_connection_buffered_packets->buffered_packets.size())
.WillRepeatedly(WithArg<2>(
Invoke([this, last_connection_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(last_connection_id, packet);
}
})));
dispatcher_->ProcessBufferedChlos(kMaxNumSessionsToCreate);
}
TEST_P(BufferedPacketStoreTest, ReceiveCHLOForBufferedConnection) {
QuicBufferedPacketStore* store =
QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
uint64_t conn_id = 1;
ProcessUndecryptableEarlyPacket(TestConnectionId(conn_id));
for (conn_id = 2;
conn_id <= kDefaultMaxConnectionsInStore + kMaxNumSessionsToCreate;
++conn_id) {
if (conn_id <= kMaxNumSessionsToCreate + 1) {
EXPECT_CALL(*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpn()), _, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id),
client_addr_, &mock_helper_, &mock_alarm_factory_,
&crypto_config_, QuicDispatcherPeer::GetCache(dispatcher_.get()),
&session1_))));
EXPECT_CALL(
*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillOnce(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
} else if (!version_.UsesTls()) {
expect_generator_is_called_ = false;
}
ProcessFirstFlight(TestConnectionId(conn_id));
}
EXPECT_FALSE(store->HasChloForConnection(
TestConnectionId(1)));
ProcessFirstFlight(TestConnectionId(1));
EXPECT_TRUE(store->HasChloForConnection(
TestConnectionId(1)));
}
TEST_P(BufferedPacketStoreTest, ProcessBufferedChloWithDifferentVersion) {
QuicDisableVersion(AllSupportedVersions().front());
uint64_t last_connection_id = kMaxNumSessionsToCreate + 5;
ParsedQuicVersionVector supported_versions = CurrentSupportedVersions();
for (uint64_t conn_id = 1; conn_id <= last_connection_id; ++conn_id) {
ParsedQuicVersion version =
supported_versions[(conn_id - 1) % supported_versions.size()];
if (conn_id <= kMaxNumSessionsToCreate) {
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpnForVersion(version)), version, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id),
client_addr_, &mock_helper_, &mock_alarm_factory_,
&crypto_config_, QuicDispatcherPeer::GetCache(dispatcher_.get()),
&session1_))));
EXPECT_CALL(
*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillRepeatedly(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
ProcessFirstFlight(version, TestConnectionId(conn_id));
}
for (uint64_t conn_id = kMaxNumSessionsToCreate + 1;
conn_id <= last_connection_id; ++conn_id) {
ParsedQuicVersion version =
supported_versions[(conn_id - 1) % supported_versions.size()];
EXPECT_CALL(
*dispatcher_,
CreateQuicSession(TestConnectionId(conn_id), _, client_addr_,
Eq(ExpectedAlpnForVersion(version)), version, _, _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, TestConnectionId(conn_id), client_addr_,
&mock_helper_, &mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.WillRepeatedly(WithArg<2>(
Invoke([this, conn_id](const QuicEncryptedPacket& packet) {
if (version_.UsesQuicCrypto()) {
ValidatePacket(TestConnectionId(conn_id), packet);
}
})));
}
dispatcher_->ProcessBufferedChlos(kMaxNumSessionsToCreate);
}
TEST_P(BufferedPacketStoreTest, BufferedChloWithEcn) {
if (!version_.HasIetfQuicFrames()) {
return;
}
SetQuicRestartFlag(quic_support_ect1, true);
InSequence s;
QuicConnectionId conn_id = TestConnectionId(1);
std::unique_ptr<QuicEncryptedPacket> encrypted_packet =
GetUndecryptableEarlyPacket(version_, conn_id);
std::unique_ptr<QuicReceivedPacket> received_packet(ConstructReceivedPacket(
*encrypted_packet, mock_helper_.GetClock()->Now(), ECN_ECT1));
ProcessReceivedPacket(std::move(received_packet), client_addr_, version_,
conn_id);
EXPECT_EQ(0u, dispatcher_->NumSessions())
<< "No session should be created before CHLO arrives.";
EXPECT_CALL(connection_id_generator_,
MaybeReplaceConnectionId(conn_id, version_))
.WillOnce(Return(std::nullopt));
EXPECT_CALL(*dispatcher_,
CreateQuicSession(conn_id, _, client_addr_, Eq(ExpectedAlpn()), _,
MatchParsedClientHello(), _))
.WillOnce(Return(ByMove(CreateSession(
dispatcher_.get(), config_, conn_id, client_addr_, &mock_helper_,
&mock_alarm_factory_, &crypto_config_,
QuicDispatcherPeer::GetCache(dispatcher_.get()), &session1_))));
bool got_ect1 = false;
bool got_ce = false;
EXPECT_CALL(*reinterpret_cast<MockQuicConnection*>(session1_->connection()),
ProcessUdpPacket(_, _, _))
.Times(2)
.WillRepeatedly(WithArg<2>(Invoke([&](const QuicReceivedPacket& packet) {
switch (packet.ecn_codepoint()) {
case ECN_ECT1:
got_ect1 = true;
break;
case ECN_CE:
got_ce = true;
break;
default:
break;
}
})));
QuicConnectionId client_connection_id = TestConnectionId(2);
std::vector<std::unique_ptr<QuicReceivedPacket>> packets =
GetFirstFlightOfPackets(version_, DefaultQuicConfig(), conn_id,
client_connection_id, TestClientCryptoConfig(),
ECN_CE);
for (auto&& packet : packets) {
ProcessReceivedPacket(std::move(packet), client_addr_, version_, conn_id);
}
EXPECT_TRUE(got_ect1);
EXPECT_TRUE(got_ce);
}
class DualCIDBufferedPacketStoreTest : public BufferedPacketStoreTest {
protected:
void SetUp() override {
BufferedPacketStoreTest::SetUp();
QuicDispatcherPeer::set_new_sessions_allowed_per_event_loop(
dispatcher_.get(), 0);
expect_generator_is_called_ = false;
EXPECT_CALL(connection_id_generator_, MaybeReplaceConnectionId(_, _))
.WillRepeatedly(Invoke(
this, &DualCIDBufferedPacketStoreTest::ReplaceConnectionIdInTest));
}
std::optional<QuicConnectionId> ReplaceConnectionIdInTest(
const QuicConnectionId& original, const ParsedQuicVersion& version) {
auto it = replaced_cid_map_.find(original);
if (it == replaced_cid_map_.end()) {
ADD_FAILURE() << "Bad test setup: no replacement CID for " << original
<< ", version " << version;
return std::nullopt;
}
return it->second;
}
QuicBufferedPacketStore& store() {
return *QuicDispatcherPeer::GetBufferedPackets(dispatcher_.get());
}
using BufferedPacketList = QuicBufferedPacketStore::BufferedPacketList;
const BufferedPacketList* FindBufferedPackets(
QuicConnectionId connection_id) {
return QuicBufferedPacketStorePeer::FindBufferedPackets(&store(),
connection_id);
}
absl::flat_hash_map<QuicConnectionId, std::optional<QuicConnectionId>>
replaced_cid_map_;
private:
using BufferedPacketStoreTest::expect_generator_is_called_;
};
INSTANTIATE_TEST_SUITE_P(DualCIDBufferedPacketStoreTests,
DualCIDBufferedPacketStoreTest,
::testing::ValuesIn(CurrentSupportedVersionsWithTls()),
::testing::PrintToStringParamName());
TEST_P(DualCIDBufferedPacketStoreTest, CanLookUpByBothCIDs) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
ProcessFirstFlight(TestConnectionId(1));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(1)));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(2)));
const BufferedPacketList* packets1 = FindBufferedPackets(TestConnectionId(1));
const BufferedPacketList* packets2 = FindBufferedPackets(TestConnectionId(2));
EXPECT_EQ(packets1, packets2);
EXPECT_EQ(packets1->original_connection_id, TestConnectionId(1));
EXPECT_EQ(packets1->replaced_connection_id, TestConnectionId(2));
}
TEST_P(DualCIDBufferedPacketStoreTest, DeliverPacketsByOriginalCID) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
ProcessFirstFlight(TestConnectionId(1));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(1)));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(2)));
ASSERT_TRUE(store().HasChloForConnection(TestConnectionId(1)));
ASSERT_TRUE(store().HasChloForConnection(TestConnectionId(2)));
ASSERT_TRUE(store().HasChlosBuffered());
BufferedPacketList packets = store().DeliverPackets(TestConnectionId(1));
EXPECT_EQ(packets.original_connection_id, TestConnectionId(1));
EXPECT_EQ(packets.replaced_connection_id, TestConnectionId(2));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(1)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(2)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(1)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(2)));
EXPECT_FALSE(store().HasChlosBuffered());
}
TEST_P(DualCIDBufferedPacketStoreTest, DeliverPacketsByReplacedCID) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
replaced_cid_map_[TestConnectionId(3)] = TestConnectionId(4);
ProcessFirstFlight(TestConnectionId(1));
ProcessFirstFlight(TestConnectionId(3));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(1)));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(3)));
ASSERT_TRUE(store().HasChloForConnection(TestConnectionId(1)));
ASSERT_TRUE(store().HasChloForConnection(TestConnectionId(3)));
ASSERT_TRUE(store().HasChlosBuffered());
BufferedPacketList packets2 = store().DeliverPackets(TestConnectionId(2));
EXPECT_EQ(packets2.original_connection_id, TestConnectionId(1));
EXPECT_EQ(packets2.replaced_connection_id, TestConnectionId(2));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(1)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(2)));
EXPECT_TRUE(store().HasBufferedPackets(TestConnectionId(3)));
EXPECT_TRUE(store().HasBufferedPackets(TestConnectionId(4)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(1)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(2)));
EXPECT_TRUE(store().HasChloForConnection(TestConnectionId(3)));
EXPECT_TRUE(store().HasChloForConnection(TestConnectionId(4)));
EXPECT_TRUE(store().HasChlosBuffered());
BufferedPacketList packets4 = store().DeliverPackets(TestConnectionId(4));
EXPECT_EQ(packets4.original_connection_id, TestConnectionId(3));
EXPECT_EQ(packets4.replaced_connection_id, TestConnectionId(4));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(3)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(4)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(3)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(4)));
EXPECT_FALSE(store().HasChlosBuffered());
}
TEST_P(DualCIDBufferedPacketStoreTest, DiscardPacketsByOriginalCID) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
ProcessFirstFlight(TestConnectionId(1));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(1)));
store().DiscardPackets(TestConnectionId(1));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(1)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(2)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(1)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(2)));
EXPECT_FALSE(store().HasChlosBuffered());
}
TEST_P(DualCIDBufferedPacketStoreTest, DiscardPacketsByReplacedCID) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
replaced_cid_map_[TestConnectionId(3)] = TestConnectionId(4);
ProcessFirstFlight(TestConnectionId(1));
ProcessFirstFlight(TestConnectionId(3));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(2)));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(4)));
store().DiscardPackets(TestConnectionId(2));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(1)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(2)));
EXPECT_TRUE(store().HasBufferedPackets(TestConnectionId(3)));
EXPECT_TRUE(store().HasBufferedPackets(TestConnectionId(4)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(1)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(2)));
EXPECT_TRUE(store().HasChloForConnection(TestConnectionId(3)));
EXPECT_TRUE(store().HasChloForConnection(TestConnectionId(4)));
EXPECT_TRUE(store().HasChlosBuffered());
store().DiscardPackets(TestConnectionId(4));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(3)));
EXPECT_FALSE(store().HasBufferedPackets(TestConnectionId(4)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(3)));
EXPECT_FALSE(store().HasChloForConnection(TestConnectionId(4)));
EXPECT_FALSE(store().HasChlosBuffered());
}
TEST_P(DualCIDBufferedPacketStoreTest, CIDCollision) {
replaced_cid_map_[TestConnectionId(1)] = TestConnectionId(2);
replaced_cid_map_[TestConnectionId(3)] = TestConnectionId(2);
ProcessFirstFlight(TestConnectionId(1));
ProcessFirstFlight(TestConnectionId(3));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(1)));
ASSERT_TRUE(store().HasBufferedPackets(TestConnectionId(2)));
ASSERT_FALSE(store().HasBufferedPackets(TestConnectionId(3)));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_dispatcher.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_dispatcher_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
568d4113-9530-4715-a653-8c08fbdd2dff | cpp | google/tsl | statusor | tsl/platform/default/statusor.h | tsl/platform/statusor_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUSOR_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_STATUSOR_H_
#include "absl/status/statusor.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/status.h"
#define TF_ASSIGN_OR_RETURN(lhs, rexpr) \
TF_ASSIGN_OR_RETURN_IMPL( \
TF_STATUS_MACROS_CONCAT_NAME(_status_or_value, __COUNTER__), lhs, rexpr)
#define TF_ASSIGN_OR_RETURN_IMPL(statusor, lhs, rexpr) \
auto statusor = (rexpr); \
if (TF_PREDICT_FALSE(!statusor.ok())) { \
return statusor.status(); \
} \
lhs = std::move(statusor).value()
#endif | #include "tsl/platform/statusor.h"
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/config.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace tsl {
namespace {
class Base1 {
public:
virtual ~Base1() {}
int pad_;
};
class Base2 {
public:
virtual ~Base2() {}
int yetotherpad_;
};
class Derived : public Base1, public Base2 {
public:
~Derived() override {}
int evenmorepad_;
};
class CopyNoAssign {
public:
explicit CopyNoAssign(int value) : foo_(value) {}
CopyNoAssign(const CopyNoAssign& other) : foo_(other.foo_) {}
int foo_;
private:
const CopyNoAssign& operator=(const CopyNoAssign&);
};
class NoDefaultConstructor {
public:
explicit NoDefaultConstructor(int foo);
};
static_assert(!std::is_default_constructible<NoDefaultConstructor>(),
"Should not be default-constructible.");
absl::StatusOr<std::unique_ptr<int>> ReturnUniquePtr() {
return std::unique_ptr<int>(new int(0));
}
TEST(StatusOr, NullPointerStatusOr) {
absl::StatusOr<int*> null_status(nullptr);
EXPECT_TRUE(null_status.ok());
EXPECT_EQ(null_status.value(), nullptr);
}
TEST(StatusOr, TestNoDefaultConstructorInitialization) {
absl::StatusOr<NoDefaultConstructor> statusor(errors::Cancelled(""));
EXPECT_FALSE(statusor.ok());
EXPECT_EQ(statusor.status().code(), absl::StatusCode::kCancelled);
absl::StatusOr<NoDefaultConstructor> statusor2;
EXPECT_FALSE(statusor2.ok());
EXPECT_EQ(statusor2.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOr, TestMoveOnlyInitialization) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
int* previous = thing.value().get();
thing = ReturnUniquePtr();
EXPECT_TRUE(thing.ok());
EXPECT_EQ(0, *thing.value());
EXPECT_NE(previous, thing.value().get());
}
TEST(StatusOr, TestMoveOnlyStatusCtr) {
absl::StatusOr<std::unique_ptr<int>> thing(errors::Cancelled(""));
ASSERT_FALSE(thing.ok());
}
TEST(StatusOr, TestMoveOnlyValueExtraction) {
absl::StatusOr<std::unique_ptr<int>> thing(ReturnUniquePtr());
ASSERT_TRUE(thing.ok());
std::unique_ptr<int> ptr = std::move(thing).value();
EXPECT_EQ(0, *ptr);
thing = std::move(ptr);
ptr = std::move(thing.value());
EXPECT_EQ(0, *ptr);
}
TEST(StatusOr, TestMoveOnlyConversion) {
absl::StatusOr<std::unique_ptr<const int>> const_thing(ReturnUniquePtr());
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
const int* const_previous = const_thing.value().get();
const_thing = ReturnUniquePtr();
EXPECT_TRUE(const_thing.ok());
EXPECT_EQ(0, *const_thing.value());
EXPECT_NE(const_previous, const_thing.value().get());
}
TEST(StatusOr, TestMoveOnlyVector) {
std::vector<absl::StatusOr<std::unique_ptr<int>>> vec;
vec.push_back(ReturnUniquePtr());
vec.resize(2);
auto another_vec = std::move(vec);
EXPECT_EQ(0, *another_vec[0].value());
EXPECT_EQ(absl::StatusCode::kUnknown, another_vec[1].status().code());
}
TEST(StatusOr, TestMoveWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = std::move(value1);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = std::move(error1);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = std::move(error2);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = std::move(value2);
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
}
TEST(StatusOr, TestCopyWithValuesAndErrors) {
absl::StatusOr<std::string> status_or(std::string(1000, '0'));
absl::StatusOr<std::string> value1(std::string(1000, '1'));
absl::StatusOr<std::string> value2(std::string(1000, '2'));
absl::StatusOr<std::string> error1(
absl::Status(absl::StatusCode::kUnknown, "error1"));
absl::StatusOr<std::string> error2(
absl::Status(absl::StatusCode::kUnknown, "error2"));
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '0'), status_or.value());
status_or = value1;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '1'), status_or.value());
status_or = error1;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error1", status_or.status().message());
status_or = error2;
ASSERT_FALSE(status_or.ok());
EXPECT_EQ("error2", status_or.status().message());
status_or = value2;
ASSERT_TRUE(status_or.ok());
EXPECT_EQ(std::string(1000, '2'), status_or.value());
EXPECT_EQ(std::string(1000, '1'), value1.value());
EXPECT_EQ("error1", error1.status().message());
EXPECT_EQ("error2", error2.status().message());
EXPECT_EQ(std::string(1000, '2'), value2.value());
}
TEST(StatusOr, TestDefaultCtor) {
absl::StatusOr<int> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestDefaultCtorValue) {
absl::StatusOr<int> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
const absl::StatusOr<int> thing2;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestStatusCtor) {
absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kCancelled);
}
TEST(StatusOr, TestValueCtor) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestCopyCtorStatusOk) {
const int kI = 4;
const absl::StatusOr<int> original(kI);
const absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOk) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestCopyCtorNonAssignable) {
const int kI = 4;
CopyNoAssign value(kI);
absl::StatusOr<CopyNoAssign> original(value);
absl::StatusOr<CopyNoAssign> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value().foo_, copy.value().foo_);
}
TEST(StatusOr, TestCopyCtorStatusOKConverting) {
const int kI = 4;
absl::StatusOr<int> original(kI);
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_DOUBLE_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestCopyCtorStatusNotOkConverting) {
absl::StatusOr<int> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<double> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestAssignmentStatusOk) {
const int kI = 4;
absl::StatusOr<int> source(kI);
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestAssignmentStatusNotOk) {
absl::StatusOr<int> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestStatus) {
absl::StatusOr<int> good(4);
EXPECT_TRUE(good.ok());
absl::StatusOr<int> bad(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(bad.ok());
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestValue) {
const int kI = 4;
absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOr, TestValueConst) {
const int kI = 4;
const absl::StatusOr<int> thing(kI);
EXPECT_EQ(kI, thing.value());
}
TEST(StatusOrDeathTest, TestValueNotOk) {
absl::StatusOr<int> thing(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "cancelled");
#endif
}
TEST(StatusOrDeathTest, TestValueNotOkConst) {
const absl::StatusOr<int> thing(absl::Status(absl::StatusCode::kUnknown, ""));
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerDefaultCtor) {
absl::StatusOr<int*> thing;
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status().code(), absl::StatusCode::kUnknown);
}
TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) {
absl::StatusOr<int*> thing;
#ifdef ABSL_HAVE_EXCEPTIONS
try {
thing.value();
ADD_FAILURE()
<< "value() returned successfully while the access is illegal";
} catch (absl::BadStatusOrAccess& ex) {
}
#else
EXPECT_DEATH(thing.value(), "");
#endif
}
TEST(StatusOr, TestPointerStatusCtor) {
absl::StatusOr<int*> thing(absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_FALSE(thing.ok());
EXPECT_EQ(thing.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValueCtor) {
const int kI = 4;
absl::StatusOr<const int*> thing(&kI);
EXPECT_TRUE(thing.ok());
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> original(&kI);
absl::StatusOr<const int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(original.value(), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOk) {
absl::StatusOr<int*> original(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) {
Derived derived;
absl::StatusOr<Derived*> original(&derived);
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
EXPECT_EQ(static_cast<const Base2*>(original.value()), copy.value());
}
TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) {
absl::StatusOr<Derived*> original(
absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<Base2*> copy(original);
EXPECT_EQ(copy.status(), original.status());
}
TEST(StatusOr, TestPointerAssignmentStatusOk) {
const int kI = 0;
absl::StatusOr<const int*> source(&kI);
absl::StatusOr<const int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
EXPECT_EQ(source.value(), target.value());
}
TEST(StatusOr, TestPointerAssignmentStatusNotOk) {
absl::StatusOr<int*> source(absl::Status(absl::StatusCode::kCancelled, ""));
absl::StatusOr<int*> target;
target = source;
EXPECT_EQ(target.status(), source.status());
}
TEST(StatusOr, TestPointerStatus) {
const int kI = 0;
absl::StatusOr<const int*> good(&kI);
EXPECT_TRUE(good.ok());
absl::StatusOr<const int*> bad(
absl::Status(absl::StatusCode::kCancelled, ""));
EXPECT_EQ(bad.status(), absl::Status(absl::StatusCode::kCancelled, ""));
}
TEST(StatusOr, TestPointerValue) {
const int kI = 0;
absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestPointerValueConst) {
const int kI = 0;
const absl::StatusOr<const int*> thing(&kI);
EXPECT_EQ(&kI, thing.value());
}
TEST(StatusOr, TestArrowOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(*uptr->get(), 0);
}
TEST(StatusOr, TestStarOperator) {
absl::StatusOr<std::unique_ptr<int>> uptr = ReturnUniquePtr();
EXPECT_EQ(**uptr, 0);
}
TEST(StatusOr, TestStarOperatorDeath) {
absl::StatusOr<Base1> error(
absl::Status(absl::StatusCode::kCancelled, "cancelled"));
EXPECT_DEATH(*error, "cancelled");
}
static absl::StatusOr<int> MakeStatus() { return 100; }
template <typename T>
class BenchmarkFactory {
public:
BenchmarkFactory() : value_(new T) {}
~BenchmarkFactory() { delete value_; }
T* TrivialFactory() TF_ATTRIBUTE_NOINLINE { return value_; }
absl::Status ArgumentFactory(T** result) TF_ATTRIBUTE_NOINLINE {
*result = value_;
return absl::OkStatus();
}
absl::Status ArgumentFactoryFail(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kCancelled, "");
}
absl::Status ArgumentFactoryFailShortMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal, "");
}
absl::Status ArgumentFactoryFailLongMsg(T** result) TF_ATTRIBUTE_NOINLINE {
*result = nullptr;
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
StatusOr<T*> StatusOrFactory() TF_ATTRIBUTE_NOINLINE {
return static_cast<T*>(value_);
}
StatusOr<T*> StatusOrFactoryFail() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kCancelled, "");
}
StatusOr<T*> StatusOrFactoryFailShortMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal, "");
}
StatusOr<T*> StatusOrFactoryFailLongMsg() TF_ATTRIBUTE_NOINLINE {
return absl::Status(absl::StatusCode::kInternal,
"a big string of message junk that will never be read");
}
private:
T* volatile value_;
BenchmarkFactory(const BenchmarkFactory&) = delete;
void operator=(const BenchmarkFactory&) = delete;
};
class BenchmarkType {
public:
BenchmarkType() {}
virtual ~BenchmarkType() {}
virtual void DoWork() TF_ATTRIBUTE_NOINLINE {}
private:
BenchmarkType(const BenchmarkType&) = delete;
void operator=(const BenchmarkType&) = delete;
};
void BM_CalibrateWorkLoop(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
BenchmarkType* result = factory.TrivialFactory();
for (auto s : state) {
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_CalibrateWorkLoop);
void BM_TrivialFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = factory.TrivialFactory();
if (result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_TrivialFactory);
void BM_ArgumentFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactory(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactory);
void BM_StatusOrFactory(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactory();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactory);
void BM_ArgumentFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFail(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFail);
void BM_StatusOrFactoryFail(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result = factory.StatusOrFactoryFail();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFail);
void BM_ArgumentFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailShortMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailShortMsg);
void BM_StatusOrFactoryFailShortMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailShortMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailShortMsg);
void BM_ArgumentFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
BenchmarkType* result = nullptr;
absl::Status status = factory.ArgumentFactoryFailLongMsg(&result);
if (status.ok() && result != nullptr) {
result->DoWork();
}
}
}
BENCHMARK(BM_ArgumentFactoryFailLongMsg);
void BM_StatusOrFactoryFailLongMsg(::testing::benchmark::State& state) {
BenchmarkFactory<BenchmarkType> factory;
for (auto s : state) {
absl::StatusOr<BenchmarkType*> result =
factory.StatusOrFactoryFailLongMsg();
if (result.ok()) {
result.value()->DoWork();
}
}
}
BENCHMARK(BM_StatusOrFactoryFailLongMsg);
#if defined(PLATFORM_GOOGLE)
absl::StatusOr<int> GetError() {
return absl::InvalidArgumentError("An invalid argument error");
}
absl::StatusOr<int> PropagateError() {
TF_ASSIGN_OR_RETURN(int a, GetError());
return a;
}
absl::StatusOr<int> PropagateError2() {
TF_ASSIGN_OR_RETURN(int a, PropagateError());
return a;
}
TEST(Status, StackTracePropagation) {
absl::StatusOr<int> s = PropagateError2();
auto sources = s.status().GetSourceLocations();
ASSERT_EQ(sources.size(), 3);
for (int i = 0; i < 3; ++i) {
ASSERT_EQ(sources[i].file_name(),
"third_party/tensorflow/tsl/platform/statusor_test.cc");
}
}
#endif
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/default/statusor.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/statusor_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
9686cbe2-1b68-4171-9e71-4db9ddcde1c8 | cpp | tensorflow/tensorflow | matchers | tensorflow/lite/testing/matchers.h | tensorflow/lite/testing/matchers_test.cc | #ifndef TENSORFLOW_LITE_TESTING_MATCHERS_H_
#define TENSORFLOW_LITE_TESTING_MATCHERS_H_
#include <algorithm>
#include <cfloat>
#include <cmath>
#include <cstring>
#include <iomanip>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/log/absl_check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
inline void PrintTo(const TfLiteTensor& tensor, std::ostream* os) {
*os << "\n" << ::tflite::GetTensorDebugString(&tensor);
}
namespace testing {
namespace tflite {
namespace internal {
enum class FloatComparison { kExact, kApproximate };
struct TensorComparison {
FloatComparison float_comp = FloatComparison::kExact;
bool custom_margin = false;
bool custom_fraction = false;
double margin = 0.0;
double fraction = 0.0;
};
class TensorMatcher {
public:
TensorMatcher(const TensorComparison& comp, const TfLiteTensor& expected)
: comp_(comp), expected_(expected) {}
bool MatchAndExplain(const TfLiteTensor& actual,
MatchResultListener* listener) const {
const bool match = Match(actual);
if (listener->IsInterested() && !match) *listener << DescribeDiff(actual);
return match;
}
void DescribeTo(std::ostream* os) const { Describe(os, "is "); }
void DescribeNegationTo(std::ostream* os) const { Describe(os, "is not "); }
void SetCompareApproximately() {
comp_.float_comp = FloatComparison::kApproximate;
}
void SetMargin(double margin) {
ABSL_QCHECK_GE(margin, 0.0)
<< "Using a negative margin for Approximately";
comp_.custom_margin = true;
comp_.margin = margin;
}
void SetFraction(double fraction) {
ABSL_QCHECK(0.0 <= fraction && fraction < 1.0)
<< "Fraction for Approximately must be >= 0.0 and < 1.0";
comp_.custom_fraction = true;
comp_.fraction = fraction;
}
private:
static std::string TensorIndex(int index, const TfLiteIntArray* dims) {
if (!dims->size) return "";
std::vector<int> index_nd(dims->size);
for (int i = dims->size - 1; i >= 0; --i) {
index_nd[i] = index % dims->data[i];
index /= dims->data[i];
}
return absl::StrCat("[", absl::StrJoin(index_nd, "]["), "]");
}
bool CompareFloat(float x, float y) const {
switch (comp_.float_comp) {
case FloatComparison::kExact:
return x == y;
case FloatComparison::kApproximate:
if (x == y) return true;
float fraction, margin;
if (comp_.custom_margin || comp_.custom_fraction) {
fraction = comp_.fraction;
margin = comp_.margin;
} else {
constexpr float kEpsilon = 32 * FLT_EPSILON;
if (std::fabs(x) <= kEpsilon && std::fabs(y) <= kEpsilon) return true;
fraction = kEpsilon;
margin = kEpsilon;
}
if (!std::isfinite(x) || !std::isfinite(y)) return false;
float relative_margin = fraction * std::max(std::fabs(x), std::fabs(y));
return std::fabs(x - y) <= std::max(margin, relative_margin);
}
return false;
}
void Describe(std::ostream* os, std::string_view prefix) const {
*os << prefix;
if (comp_.float_comp == FloatComparison::kApproximate) {
*os << "approximately ";
if (comp_.custom_margin || comp_.custom_fraction) {
*os << "(";
if (comp_.custom_margin) {
std::stringstream ss;
ss << std::setprecision(std::numeric_limits<double>::digits10 + 2)
<< comp_.margin;
*os << "absolute error of float values <= " << ss.str();
}
if (comp_.custom_margin && comp_.custom_fraction) {
*os << " or ";
}
if (comp_.custom_fraction) {
std::stringstream ss;
ss << std::setprecision(std::numeric_limits<double>::digits10 + 2)
<< comp_.fraction;
*os << "relative error of float values <= " << ss.str();
}
*os << ") ";
}
}
*os << "equal to ";
PrintTo(expected_, os);
}
std::string DescribeDiff(const TfLiteTensor& actual) const {
if (actual.type != expected_.type) {
return absl::StrCat(
"dtypes don't match: ", TfLiteTypeGetName(actual.type), " vs ",
TfLiteTypeGetName(expected_.type));
}
if (!actual.dims) return "actual.dims is null.";
if (!expected_.dims) return "expected.dims is null.";
if (actual.dims->size != expected_.dims->size) {
return absl::StrCat("dims don't match: ", actual.dims->size, "D vs ",
expected_.dims->size, "D");
}
if (int n = actual.dims->size;
std::memcmp(actual.dims->data, expected_.dims->data, n * sizeof(int))) {
return absl::StrCat(
"shapes don't match: ", ::tflite::GetShapeDebugString(actual.dims),
" vs ", ::tflite::GetShapeDebugString(expected_.dims));
}
if (!actual.data.raw) return "actual.data is null.";
if (!expected_.data.raw) return "expected.data is null.";
if (actual.bytes != expected_.bytes) {
return absl::StrCat("bytes don't match: ", actual.bytes, " vs ",
expected_.bytes);
}
std::string error = "\n";
TfLiteIntArray* dims = actual.dims;
int n = ::tflite::NumElements(dims);
constexpr int kMaxMismatches = 20;
for (int i = 0, j = 0; i < n; ++i) {
if (!CompareFloat(actual.data.f[i], expected_.data.f[i])) {
absl::StrAppend(&error, "data", TensorIndex(i, dims),
" don't match: ", actual.data.f[i], " vs ",
expected_.data.f[i], "\n");
++j;
}
if (j == kMaxMismatches) {
absl::StrAppend(&error, "Too many mismatches; stopping after ", j,
".\n");
break;
}
}
return error;
}
bool Match(const TfLiteTensor& actual) const {
if (actual.type != expected_.type) return false;
if (!actual.dims) return false;
if (!expected_.dims) return false;
if (actual.dims->size != expected_.dims->size) return false;
if (int n = actual.dims->size;
std::memcmp(actual.dims->data, expected_.dims->data, n * sizeof(int))) {
return false;
}
if (!actual.data.raw) return false;
if (!expected_.data.raw) return false;
if (actual.bytes != expected_.bytes) return false;
switch (comp_.float_comp) {
case FloatComparison::kExact:
if (int n = actual.bytes;
std::memcmp(actual.data.raw, expected_.data.raw, n)) {
return false;
}
break;
case FloatComparison::kApproximate:
for (int i = 0, n = ::tflite::NumElements(actual.dims); i < n; ++i) {
if (!CompareFloat(actual.data.f[i], expected_.data.f[i])) {
return false;
}
}
break;
};
return true;
}
TensorComparison comp_;
TfLiteTensor expected_;
};
}
struct SimpleConstTensor : public TfLiteTensor {
template <typename T>
SimpleConstTensor(TfLiteType dtype, const std::vector<int>& shape,
absl::Span<T> buf) {
type = dtype;
dims = TfLiteIntArrayCreate(shape.size());
std::memcpy(dims->data, shape.data(), shape.size() * sizeof(int));
data = {.data = buf.data()};
bytes = buf.size() * sizeof(T);
sparsity = nullptr;
}
~SimpleConstTensor() { TfLiteIntArrayFree(dims); }
};
inline void PrintTo(const SimpleConstTensor& tensor,
std::ostream* os) {
PrintTo(absl::implicit_cast<const TfLiteTensor&>(tensor), os);
}
inline PolymorphicMatcher<internal::TensorMatcher> EqualsTensor(
const TfLiteTensor& expected) {
internal::TensorComparison comp;
return MakePolymorphicMatcher(internal::TensorMatcher(comp, expected));
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m) {
m.mutable_impl().SetCompareApproximately();
return m;
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m, double margin) {
m.mutable_impl().SetCompareApproximately();
m.mutable_impl().SetMargin(margin);
return m;
}
template <class InnerTensorMatcherT>
inline InnerTensorMatcherT Approximately(InnerTensorMatcherT m, double margin,
double fraction) {
m.mutable_impl().SetCompareApproximately();
m.mutable_impl().SetMargin(margin);
m.mutable_impl().SetFraction(fraction);
return m;
}
}
}
#endif | #include "tensorflow/lite/testing/matchers.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
namespace tflite {
namespace {
using ::testing::tflite::Approximately;
using ::testing::tflite::EqualsTensor;
using ::testing::tflite::SimpleConstTensor;
TEST(TensorMatcherTest, ExactlyEqualsSelf) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
EXPECT_THAT(a, EqualsTensor(a));
}
TEST(TensorMatcherTest, ExactlyEqualsSame) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.71828f, 3.14159f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, EqualsTensor(b));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentType) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
SimpleConstTensor b(TfLiteType::kTfLiteInt32, {1, 2}, absl::MakeSpan(data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentDims) {
float data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2}, absl::MakeSpan(data));
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {2, 1}, absl::MakeSpan(data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, DoesNotExactlyEqualDifferentData) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {3.14159f, 2.71828f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(EqualsTensor(b)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsDefaultMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.718277f, 3.141593f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Approximately(EqualsTensor(b)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsWithLooseMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Approximately(EqualsTensor(b), 0.01));
}
TEST(TensorMatcherTest, DoesNotApproximatelyEqualWithTightMargin) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(Approximately(EqualsTensor(b), 0.001)));
}
TEST(TensorMatcherTest, ApproximatelyEqualsWithLooseFraction) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(
a, Approximately(EqualsTensor(b), 0.0, 0.999));
}
TEST(TensorMatcherTest, DoesNotApproximatelyEqualWithTightFraction) {
float a_data[] = {2.71828f, 3.14159f};
SimpleConstTensor a(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(a_data));
float b_data[] = {2.72f, 3.14f};
SimpleConstTensor b(TfLiteType::kTfLiteFloat32, {1, 2},
absl::MakeSpan(b_data));
EXPECT_THAT(a, Not(Approximately(EqualsTensor(b), 0.0,
0.0001)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/matchers.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1560fb68-ac27-4a30-ae6c-29abc590bc38 | cpp | google/cel-cpp | create_list_step | eval/eval/create_list_step.cc | eval/eval/create_list_step_test.cc | #include "eval/eval/create_list_step.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/optional.h"
#include "base/ast_internal/expr.h"
#include "common/casting.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/attribute_utility.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "internal/status_macros.h"
#include "runtime/internal/mutable_list_impl.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::ListValueBuilderInterface;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::runtime_internal::MutableListValue;
class CreateListStep : public ExpressionStepBase {
public:
CreateListStep(int64_t expr_id, int list_size,
absl::flat_hash_set<int> optional_indices)
: ExpressionStepBase(expr_id),
list_size_(list_size),
optional_indices_(std::move(optional_indices)) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
int list_size_;
absl::flat_hash_set<int32_t> optional_indices_;
};
absl::Status CreateListStep::Evaluate(ExecutionFrame* frame) const {
if (list_size_ < 0) {
return absl::Status(absl::StatusCode::kInternal,
"CreateListStep: list size is <0");
}
if (!frame->value_stack().HasEnough(list_size_)) {
return absl::Status(absl::StatusCode::kInternal,
"CreateListStep: stack underflow");
}
auto args = frame->value_stack().GetSpan(list_size_);
cel::Value result;
for (const auto& arg : args) {
if (arg->Is<cel::ErrorValue>()) {
result = arg;
frame->value_stack().Pop(list_size_);
frame->value_stack().Push(std::move(result));
return absl::OkStatus();
}
}
if (frame->enable_unknowns()) {
absl::optional<UnknownValue> unknown_set =
frame->attribute_utility().IdentifyAndMergeUnknowns(
args, frame->value_stack().GetAttributeSpan(list_size_),
true);
if (unknown_set.has_value()) {
frame->value_stack().Pop(list_size_);
frame->value_stack().Push(std::move(unknown_set).value());
return absl::OkStatus();
}
}
CEL_ASSIGN_OR_RETURN(auto builder,
frame->value_manager().NewListValueBuilder(
frame->value_manager().GetDynListType()));
builder->Reserve(args.size());
for (size_t i = 0; i < args.size(); ++i) {
auto& arg = args[i];
if (optional_indices_.contains(static_cast<int32_t>(i))) {
if (auto optional_arg = cel::As<cel::OptionalValue>(arg); optional_arg) {
if (!optional_arg->HasValue()) {
continue;
}
CEL_RETURN_IF_ERROR(builder->Add(optional_arg->Value()));
} else {
return cel::TypeConversionError(arg.GetTypeName(), "optional_type")
.NativeValue();
}
} else {
CEL_RETURN_IF_ERROR(builder->Add(std::move(arg)));
}
}
frame->value_stack().PopAndPush(list_size_, std::move(*builder).Build());
return absl::OkStatus();
}
absl::flat_hash_set<int32_t> MakeOptionalIndicesSet(
const cel::ast_internal::CreateList& create_list_expr) {
absl::flat_hash_set<int32_t> optional_indices;
for (size_t i = 0; i < create_list_expr.elements().size(); ++i) {
if (create_list_expr.elements()[i].optional()) {
optional_indices.insert(static_cast<int32_t>(i));
}
}
return optional_indices;
}
class CreateListDirectStep : public DirectExpressionStep {
public:
CreateListDirectStep(
std::vector<std::unique_ptr<DirectExpressionStep>> elements,
absl::flat_hash_set<int32_t> optional_indices, int64_t expr_id)
: DirectExpressionStep(expr_id),
elements_(std::move(elements)),
optional_indices_(std::move(optional_indices)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute_trail) const override {
CEL_ASSIGN_OR_RETURN(auto builder,
frame.value_manager().NewListValueBuilder(
frame.value_manager().GetDynListType()));
builder->Reserve(elements_.size());
AttributeUtility::Accumulator unknowns =
frame.attribute_utility().CreateAccumulator();
AttributeTrail tmp_attr;
for (size_t i = 0; i < elements_.size(); ++i) {
const auto& element = elements_[i];
CEL_RETURN_IF_ERROR(element->Evaluate(frame, result, tmp_attr));
if (cel::InstanceOf<ErrorValue>(result)) return absl::OkStatus();
if (frame.attribute_tracking_enabled()) {
if (frame.missing_attribute_errors_enabled()) {
if (frame.attribute_utility().CheckForMissingAttribute(tmp_attr)) {
CEL_ASSIGN_OR_RETURN(
result, frame.attribute_utility().CreateMissingAttributeError(
tmp_attr.attribute()));
return absl::OkStatus();
}
}
if (frame.unknown_processing_enabled()) {
if (InstanceOf<UnknownValue>(result)) {
unknowns.Add(Cast<UnknownValue>(result));
}
if (frame.attribute_utility().CheckForUnknown(tmp_attr,
true)) {
unknowns.Add(tmp_attr);
}
}
}
if (optional_indices_.contains(static_cast<int32_t>(i))) {
if (auto optional_arg =
cel::As<cel::OptionalValue>(static_cast<const Value&>(result));
optional_arg) {
if (!optional_arg->HasValue()) {
continue;
}
CEL_RETURN_IF_ERROR(builder->Add(optional_arg->Value()));
continue;
}
return cel::TypeConversionError(result.GetTypeName(), "optional_type")
.NativeValue();
}
CEL_RETURN_IF_ERROR(builder->Add(std::move(result)));
}
if (!unknowns.IsEmpty()) {
result = std::move(unknowns).Build();
return absl::OkStatus();
}
result = std::move(*builder).Build();
return absl::OkStatus();
}
private:
std::vector<std::unique_ptr<DirectExpressionStep>> elements_;
absl::flat_hash_set<int32_t> optional_indices_;
};
class MutableListStep : public ExpressionStepBase {
public:
explicit MutableListStep(int64_t expr_id) : ExpressionStepBase(expr_id) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
};
absl::Status MutableListStep::Evaluate(ExecutionFrame* frame) const {
CEL_ASSIGN_OR_RETURN(auto builder,
frame->value_manager().NewListValueBuilder(
frame->value_manager().GetDynListType()));
frame->value_stack().Push(cel::OpaqueValue{
frame->value_manager().GetMemoryManager().MakeShared<MutableListValue>(
std::move(builder))});
return absl::OkStatus();
}
class DirectMutableListStep : public DirectExpressionStep {
public:
explicit DirectMutableListStep(int64_t expr_id)
: DirectExpressionStep(expr_id) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override;
};
absl::Status DirectMutableListStep::Evaluate(
ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute_trail) const {
CEL_ASSIGN_OR_RETURN(auto builder,
frame.value_manager().NewListValueBuilder(
frame.value_manager().GetDynListType()));
result = cel::OpaqueValue{
frame.value_manager().GetMemoryManager().MakeShared<MutableListValue>(
std::move(builder))};
return absl::OkStatus();
}
}
std::unique_ptr<DirectExpressionStep> CreateDirectListStep(
std::vector<std::unique_ptr<DirectExpressionStep>> deps,
absl::flat_hash_set<int32_t> optional_indices, int64_t expr_id) {
return std::make_unique<CreateListDirectStep>(
std::move(deps), std::move(optional_indices), expr_id);
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateCreateListStep(
const cel::ast_internal::CreateList& create_list_expr, int64_t expr_id) {
return std::make_unique<CreateListStep>(
expr_id, create_list_expr.elements().size(),
MakeOptionalIndicesSet(create_list_expr));
}
std::unique_ptr<ExpressionStep> CreateMutableListStep(int64_t expr_id) {
return std::make_unique<MutableListStep>(expr_id);
}
std::unique_ptr<DirectExpressionStep> CreateDirectMutableListStep(
int64_t expr_id) {
return std::make_unique<DirectMutableListStep>(expr_id);
}
} | #include "eval/eval/create_list_step.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "base/ast_internal/expr.h"
#include "base/attribute.h"
#include "base/attribute_set.h"
#include "base/type_provider.h"
#include "common/casting.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/const_value_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/ident_step.h"
#include "eval/internal/interop.h"
#include "eval/public/activation.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/testing/matchers.h"
#include "eval/public/unknown_attribute_set.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::Attribute;
using ::cel::AttributeQualifier;
using ::cel::AttributeSet;
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::IntValue;
using ::cel::ListValue;
using ::cel::TypeProvider;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::ast_internal::Expr;
using ::cel::test::IntValueIs;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
absl::StatusOr<CelValue> RunExpression(const std::vector<int64_t>& values,
google::protobuf::Arena* arena,
bool enable_unknowns) {
ExecutionPath path;
Expr dummy_expr;
auto& create_list = dummy_expr.mutable_list_expr();
for (auto value : values) {
auto& expr0 = create_list.mutable_elements().emplace_back().mutable_expr();
expr0.mutable_const_expr().set_int64_value(value);
CEL_ASSIGN_OR_RETURN(
auto const_step,
CreateConstValueStep(cel::interop_internal::CreateIntValue(value),
-1));
path.push_back(std::move(const_step));
}
CEL_ASSIGN_OR_RETURN(auto step,
CreateCreateListStep(create_list, dummy_expr.id()));
path.push_back(std::move(step));
cel::RuntimeOptions options;
if (enable_unknowns) {
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
}
CelExpressionFlatImpl cel_expr(
FlatExpression(std::move(path),
0, TypeProvider::Builtin(),
options));
Activation activation;
return cel_expr.Evaluate(activation, arena);
}
absl::StatusOr<CelValue> RunExpressionWithCelValues(
const std::vector<CelValue>& values, google::protobuf::Arena* arena,
bool enable_unknowns) {
ExecutionPath path;
Expr dummy_expr;
Activation activation;
auto& create_list = dummy_expr.mutable_list_expr();
int ind = 0;
for (auto value : values) {
std::string var_name = absl::StrCat("name_", ind++);
auto& expr0 = create_list.mutable_elements().emplace_back().mutable_expr();
expr0.set_id(ind);
expr0.mutable_ident_expr().set_name(var_name);
CEL_ASSIGN_OR_RETURN(auto ident_step,
CreateIdentStep(expr0.ident_expr(), expr0.id()));
path.push_back(std::move(ident_step));
activation.InsertValue(var_name, value);
}
CEL_ASSIGN_OR_RETURN(auto step0,
CreateCreateListStep(create_list, dummy_expr.id()));
path.push_back(std::move(step0));
cel::RuntimeOptions options;
if (enable_unknowns) {
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
}
CelExpressionFlatImpl cel_expr(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), options));
return cel_expr.Evaluate(activation, arena);
}
class CreateListStepTest : public testing::TestWithParam<bool> {};
TEST(CreateListStepTest, TestCreateListStackUnderflow) {
ExecutionPath path;
Expr dummy_expr;
auto& create_list = dummy_expr.mutable_list_expr();
auto& expr0 = create_list.mutable_elements().emplace_back().mutable_expr();
expr0.mutable_const_expr().set_int64_value(1);
ASSERT_OK_AND_ASSIGN(auto step0,
CreateCreateListStep(create_list, dummy_expr.id()));
path.push_back(std::move(step0));
CelExpressionFlatImpl cel_expr(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), cel::RuntimeOptions{}));
Activation activation;
google::protobuf::Arena arena;
auto status = cel_expr.Evaluate(activation, &arena);
ASSERT_THAT(status, Not(IsOk()));
}
TEST_P(CreateListStepTest, CreateListEmpty) {
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result, RunExpression({}, &arena, GetParam()));
ASSERT_TRUE(result.IsList());
EXPECT_THAT(result.ListOrDie()->size(), Eq(0));
}
TEST_P(CreateListStepTest, CreateListOne) {
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(CelValue result,
RunExpression({100}, &arena, GetParam()));
ASSERT_TRUE(result.IsList());
const auto& list = *result.ListOrDie();
ASSERT_THAT(list.size(), Eq(1));
const CelValue& value = list.Get(&arena, 0);
EXPECT_THAT(value, test::IsCelInt64(100));
}
TEST_P(CreateListStepTest, CreateListWithError) {
google::protobuf::Arena arena;
std::vector<CelValue> values;
CelError error = absl::InvalidArgumentError("bad arg");
values.push_back(CelValue::CreateError(&error));
ASSERT_OK_AND_ASSIGN(CelValue result,
RunExpressionWithCelValues(values, &arena, GetParam()));
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*result.ErrorOrDie(), Eq(absl::InvalidArgumentError("bad arg")));
}
TEST_P(CreateListStepTest, CreateListWithErrorAndUnknown) {
google::protobuf::Arena arena;
std::vector<CelValue> values;
Expr expr0;
expr0.mutable_ident_expr().set_name("name0");
CelAttribute attr0(expr0.ident_expr().name(), {});
UnknownSet unknown_set0(UnknownAttributeSet({attr0}));
values.push_back(CelValue::CreateUnknownSet(&unknown_set0));
CelError error = absl::InvalidArgumentError("bad arg");
values.push_back(CelValue::CreateError(&error));
ASSERT_OK_AND_ASSIGN(CelValue result,
RunExpressionWithCelValues(values, &arena, GetParam()));
ASSERT_TRUE(result.IsError());
EXPECT_THAT(*result.ErrorOrDie(), Eq(absl::InvalidArgumentError("bad arg")));
}
TEST_P(CreateListStepTest, CreateListHundred) {
google::protobuf::Arena arena;
std::vector<int64_t> values;
for (size_t i = 0; i < 100; i++) {
values.push_back(i);
}
ASSERT_OK_AND_ASSIGN(CelValue result,
RunExpression(values, &arena, GetParam()));
ASSERT_TRUE(result.IsList());
const auto& list = *result.ListOrDie();
EXPECT_THAT(list.size(), Eq(static_cast<int>(values.size())));
for (size_t i = 0; i < values.size(); i++) {
EXPECT_THAT(list.Get(&arena, i), test::IsCelInt64(values[i]));
}
}
INSTANTIATE_TEST_SUITE_P(CombinedCreateListTest, CreateListStepTest,
testing::Bool());
TEST(CreateListStepTest, CreateListHundredAnd2Unknowns) {
google::protobuf::Arena arena;
std::vector<CelValue> values;
Expr expr0;
expr0.mutable_ident_expr().set_name("name0");
CelAttribute attr0(expr0.ident_expr().name(), {});
Expr expr1;
expr1.mutable_ident_expr().set_name("name1");
CelAttribute attr1(expr1.ident_expr().name(), {});
UnknownSet unknown_set0(UnknownAttributeSet({attr0}));
UnknownSet unknown_set1(UnknownAttributeSet({attr1}));
for (size_t i = 0; i < 100; i++) {
values.push_back(CelValue::CreateInt64(i));
}
values.push_back(CelValue::CreateUnknownSet(&unknown_set0));
values.push_back(CelValue::CreateUnknownSet(&unknown_set1));
ASSERT_OK_AND_ASSIGN(CelValue result,
RunExpressionWithCelValues(values, &arena, true));
ASSERT_TRUE(result.IsUnknownSet());
const UnknownSet* result_set = result.UnknownSetOrDie();
EXPECT_THAT(result_set->unknown_attributes().size(), Eq(2));
}
TEST(CreateDirectListStep, Basic) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(IntValue(1), -1));
deps.push_back(CreateConstValueDirectStep(IntValue(2), -1));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ListValue>(result));
EXPECT_THAT(Cast<ListValue>(result).Size(), IsOkAndHolds(2));
}
TEST(CreateDirectListStep, ForwardFirstError) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateErrorValue(absl::InternalError("test1")), -1));
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateErrorValue(absl::InternalError("test2")), -1));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ErrorValue>(result));
EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(),
StatusIs(absl::StatusCode::kInternal, "test1"));
}
std::vector<std::string> UnknownAttrNames(const UnknownValue& v) {
std::vector<std::string> names;
names.reserve(v.attribute_set().size());
for (const auto& attr : v.attribute_set()) {
EXPECT_OK(attr.AsString().status());
names.push_back(attr.AsString().value_or("<empty>"));
}
return names;
}
TEST(CreateDirectListStep, MergeUnknowns) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
ExecutionFrameBase frame(activation, options, value_factory.get());
AttributeSet attr_set1({Attribute("var1")});
AttributeSet attr_set2({Attribute("var2")});
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateUnknownValue(std::move(attr_set1)), -1));
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateUnknownValue(std::move(attr_set2)), -1));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<UnknownValue>(result));
EXPECT_THAT(UnknownAttrNames(Cast<UnknownValue>(result)),
UnorderedElementsAre("var1", "var2"));
}
TEST(CreateDirectListStep, ErrorBeforeUnknown) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
AttributeSet attr_set1({Attribute("var1")});
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateErrorValue(absl::InternalError("test1")), -1));
deps.push_back(CreateConstValueDirectStep(
value_factory.get().CreateErrorValue(absl::InternalError("test2")), -1));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ErrorValue>(result));
EXPECT_THAT(Cast<ErrorValue>(result).NativeValue(),
StatusIs(absl::StatusCode::kInternal, "test1"));
}
class SetAttrDirectStep : public DirectExpressionStep {
public:
explicit SetAttrDirectStep(Attribute attr)
: DirectExpressionStep(-1), attr_(std::move(attr)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attr) const override {
result = frame.value_manager().GetNullValue();
attr = AttributeTrail(attr_);
return absl::OkStatus();
}
private:
cel::Attribute attr_;
};
TEST(CreateDirectListStep, MissingAttribute) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
options.enable_missing_attribute_errors = true;
activation.SetMissingPatterns({cel::AttributePattern(
"var1", {cel::AttributeQualifierPattern::OfString("field1")})});
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(
CreateConstValueDirectStep(value_factory.get().GetNullValue(), -1));
deps.push_back(std::make_unique<SetAttrDirectStep>(
Attribute("var1", {AttributeQualifier::OfString("field1")})));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ErrorValue>(result));
EXPECT_THAT(
Cast<ErrorValue>(result).NativeValue(),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("var1.field1")));
}
TEST(CreateDirectListStep, OptionalPresentSet) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(IntValue(1), -1));
deps.push_back(CreateConstValueDirectStep(
cel::OptionalValue::Of(value_factory.get().GetMemoryManager(),
IntValue(2)),
-1));
auto step = CreateDirectListStep(std::move(deps), {1}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ListValue>(result));
auto list = Cast<ListValue>(result);
EXPECT_THAT(list.Size(), IsOkAndHolds(2));
EXPECT_THAT(list.Get(value_factory.get(), 0), IsOkAndHolds(IntValueIs(1)));
EXPECT_THAT(list.Get(value_factory.get(), 1), IsOkAndHolds(IntValueIs(2)));
}
TEST(CreateDirectListStep, OptionalAbsentNotSet) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(CreateConstValueDirectStep(IntValue(1), -1));
deps.push_back(CreateConstValueDirectStep(cel::OptionalValue::None(), -1));
auto step = CreateDirectListStep(std::move(deps), {1}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<ListValue>(result));
auto list = Cast<ListValue>(result);
EXPECT_THAT(list.Size(), IsOkAndHolds(1));
EXPECT_THAT(list.Get(value_factory.get(), 0), IsOkAndHolds(IntValueIs(1)));
}
TEST(CreateDirectListStep, PartialUnknown) {
cel::ManagedValueFactory value_factory(
cel::TypeProvider::Builtin(), cel::MemoryManagerRef::ReferenceCounting());
cel::Activation activation;
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kAttributeOnly;
activation.SetUnknownPatterns({cel::AttributePattern(
"var1", {cel::AttributeQualifierPattern::OfString("field1")})});
ExecutionFrameBase frame(activation, options, value_factory.get());
std::vector<std::unique_ptr<DirectExpressionStep>> deps;
deps.push_back(
CreateConstValueDirectStep(value_factory.get().CreateIntValue(1), -1));
deps.push_back(std::make_unique<SetAttrDirectStep>(Attribute("var1", {})));
auto step = CreateDirectListStep(std::move(deps), {}, -1);
cel::Value result;
AttributeTrail attr;
ASSERT_OK(step->Evaluate(frame, result, attr));
ASSERT_TRUE(InstanceOf<UnknownValue>(result));
EXPECT_THAT(UnknownAttrNames(Cast<UnknownValue>(result)),
UnorderedElementsAre("var1"));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/create_list_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/create_list_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
d43e90d3-982d-493d-b392-cc60f915b654 | cpp | tensorflow/tensorflow | sparsify_model | tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc | tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc | #include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <cstdint>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h"
#include "tensorflow/compiler/mlir/lite/transforms/dense_to_sparse_pass.h"
#include "tensorflow/compiler/mlir/lite/transforms/pass_registry_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir {
namespace lite {
absl::Status SparsifyModel(const tflite::ModelT& input_model,
flatbuffers::FlatBufferBuilder* builder) {
MLIRContext context;
StatusScopedDiagnosticHandler statusHandler(&context,
true);
flatbuffers::FlatBufferBuilder input_builder;
flatbuffers::Offset<tflite::Model> input_model_location =
tflite::Model::Pack(input_builder, &input_model);
tflite::FinishModelBuffer(input_builder, input_model_location);
std::string serialized_model(
reinterpret_cast<const char*>(input_builder.GetBufferPointer()),
input_builder.GetSize());
OwningOpRef<mlir::ModuleOp> module = tflite::FlatBufferToMlir(
serialized_model, &context, UnknownLoc::get(&context));
if (!module) {
LOG(ERROR) << "Couldn't import flatbuffer to MLIR.";
return absl::InternalError("Couldn't import flatbuffer to MLIR.");
}
PassManager pm((*module)->getName(), OpPassManager::Nesting::Implicit);
pm.addPass(TFL::Create<TFL::DenseToSparsePass>());
if (failed(pm.run(module.get()))) {
LOG(ERROR) << "Failed to sparsify: "
<< statusHandler.ConsumeStatus().message();
return absl::InternalError(absl::StrCat(
"Failed to sparsify: ", statusHandler.ConsumeStatus().message()));
}
std::string result;
tflite::FlatbufferExportOptions options;
options.converter_flags.set_force_select_tf_ops(false);
options.converter_flags.set_enable_select_tf_ops(true);
options.converter_flags.set_allow_custom_ops(true);
for (const auto& metadata : input_model.metadata) {
if (metadata->name != tflite::optimize::kTfLiteReducedPrecisionKey) {
continue;
}
const auto& data = input_model.buffers[metadata->buffer]->data;
options.metadata[metadata->name] = std::string(data.begin(), data.end());
break;
}
if (!tflite::MlirToFlatBufferTranslateFunction(module.get(), options,
&result)) {
LOG(ERROR) << "Failed to export MLIR to flatbuffer.";
return absl::InternalError("Failed to export MLIR to flatbuffer.");
}
builder->PushFlatBuffer(reinterpret_cast<const uint8_t*>(result.data()),
result.size());
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/lite/sparsity/sparsify_model.h"
#include <stdint.h>
#include <cstdarg>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/tools/optimize/reduced_precision_metadata.h"
namespace mlir {
namespace lite {
namespace {
TEST(SparsifyModelTest, MetadataIsAddedToOutputModel) {
std::string expected_key = tflite::optimize::kTfLiteReducedPrecisionKey;
std::string expected_value = "test_data";
auto input_fbm = mlir::TFL::FlatBufferModelAbslError::BuildFromFile(
"tensorflow/compiler/mlir/lite/sparsity/testdata/"
"sparse_tensor.bin");
tflite::ModelT input_model;
input_fbm->GetModel()->UnPackTo(&input_model);
auto model_metadata_buffer = std::make_unique<tflite::BufferT>();
model_metadata_buffer->data =
std::vector<uint8_t>(expected_value.begin(), expected_value.end());
input_model.buffers.push_back(std::move(model_metadata_buffer));
auto metadata_t = std::make_unique<tflite::MetadataT>();
metadata_t->name = tflite::optimize::kTfLiteReducedPrecisionKey;
metadata_t->buffer = input_model.buffers.size() - 1;
input_model.metadata.push_back(std::move(metadata_t));
flatbuffers::FlatBufferBuilder output_builder;
ASSERT_TRUE(SparsifyModel(input_model, &output_builder).ok());
auto output_fbm = mlir::TFL::FlatBufferModelAbslError::BuildFromBuffer(
reinterpret_cast<const char*>(output_builder.GetCurrentBufferPointer()),
output_builder.GetSize());
tflite::ModelT output_model;
output_fbm->GetModel()->UnPackTo(&output_model);
std::map<std::string, std::string> output_metadata;
for (const auto& metadata : output_model.metadata) {
const auto& data = output_model.buffers[metadata->buffer]->data;
output_metadata[metadata->name] = std::string(data.begin(), data.end());
}
EXPECT_THAT(output_metadata,
testing::Contains(testing::Pair(expected_key, expected_value)));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/sparsity/sparsify_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/sparsity/sparsify_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5302a1e6-ab88-4a50-a323-5dace045e104 | cpp | tensorflow/tensorflow | input_lowering_metrics_pass | tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass.cc | tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass_test.cc | #include <memory>
#include "llvm/ADT/DenseSet.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::Operation;
using mlir::WalkResult;
#define GEN_PASS_DEF_INPUTLOWERINGMETRICSPASS
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h.inc"
auto* dynamism_op_counter = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/dynamism_op_counter",
"Counts how many ops are dynamic", "op_name");
auto* dynamism_function_counter = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter",
"Counts how many functions are dynamic", "has_dynamism");
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
constexpr char kDynamicFunctionName[] = "kDynamicFunction";
class InputMetricsLoweringPass
: public impl::InputLoweringMetricsPassBase<InputMetricsLoweringPass> {
public:
void runOnOperation() override;
};
void InputMetricsLoweringPass::runOnOperation() {
bool has_dynamic_op = false;
Operation* func_op = getOperation();
func_op->walk([&](Operation* op) {
auto abstractOp = op->getRegisteredInfo();
if (!abstractOp) return WalkResult::advance();
if (mlir::mhlo::IsDynamicPadderOp(abstractOp->getTypeID())) {
has_dynamic_op = true;
dynamism_op_counter->GetCell(op->getName().getStringRef().str())
->IncrementBy(1);
}
return WalkResult::advance();
});
if (has_dynamic_op) {
dynamism_function_counter->GetCell(kDynamicFunctionName)->IncrementBy(1);
} else {
dynamism_function_counter->GetCell(kNotDynamicFunctionName)->IncrementBy(1);
}
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateInputLoweringMetricsPass() {
return std::make_unique<InputMetricsLoweringPass>();
}
}
}
} | #include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
constexpr char kDynamicFunctionName[] = "kDynamicFunction";
static constexpr char kDynamismOpCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_op_counter";
static constexpr char kDynamismFunctionCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter";
class InputLoweringMetricsPassTest : public testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(CreateInputLoweringMetricsPass());
}
bool ModulesEqual(const ModuleOp& module_before,
const ModuleOp& module_after) {
return mlir::OperationEquivalence::isEquivalentTo(
module_before, module_after, mlir::OperationEquivalence::None);
}
mlir::LogicalResult Run() {
mlir::OwningOpRef<mlir::ModuleOp> module_before = module_->clone();
LogicalResult run_result = pm_->run(module_.get());
EXPECT_TRUE(ModulesEqual(*module_before, *module_));
return run_result;
}
private:
mlir::MLIRContext context_;
mlir::OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(InputLoweringMetricsPassTest, CountsNoDynamicOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> dynamism_op_counter(kDynamismOpCounterStreamzName);
CellReader<int64_t> dynamism_function_counter(
kDynamismFunctionCounterStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(dynamism_function_counter.Delta(kNotDynamicFunctionName), 1);
}
TEST_F(InputLoweringMetricsPassTest, CountsDynamicOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%cst0 = "tf.Const"(){ value = dense<0> : tensor<3x5xi1>} : () -> tensor<3x5xi1>
%0 = "tf.Where"(%cst0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
func.return
}
})";
CellReader<int64_t> dynamism_counter(kDynamismOpCounterStreamzName);
CellReader<int64_t> dynamism_function_counter(
kDynamismFunctionCounterStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(dynamism_counter.Delta("tf.Where"), 1);
EXPECT_EQ(dynamism_function_counter.Delta(kDynamicFunctionName), 1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
868102d5-3fce-4df6-ae58-7cc321b9294f | cpp | google/leveldb | bloom | util/bloom.cc | util/bloom_test.cc | #include "leveldb/filter_policy.h"
#include "leveldb/slice.h"
#include "util/hash.h"
namespace leveldb {
namespace {
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
class BloomFilterPolicy : public FilterPolicy {
public:
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
k_ = static_cast<size_t>(bits_per_key * 0.69);
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
size_t bits = n * bits_per_key_;
if (bits < 64) bits = 64;
size_t bytes = (bits + 7) / 8;
bits = bytes * 8;
const size_t init_size = dst->size();
dst->resize(init_size + bytes, 0);
dst->push_back(static_cast<char>(k_));
char* array = &(*dst)[init_size];
for (int i = 0; i < n; i++) {
uint32_t h = BloomHash(keys[i]);
const uint32_t delta = (h >> 17) | (h << 15);
for (size_t j = 0; j < k_; j++) {
const uint32_t bitpos = h % bits;
array[bitpos / 8] |= (1 << (bitpos % 8));
h += delta;
}
}
}
bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
const size_t len = bloom_filter.size();
if (len < 2) return false;
const char* array = bloom_filter.data();
const size_t bits = (len - 1) * 8;
const size_t k = array[len - 1];
if (k > 30) {
return true;
}
uint32_t h = BloomHash(key);
const uint32_t delta = (h >> 17) | (h << 15);
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
private:
size_t bits_per_key_;
size_t k_;
};
}
const FilterPolicy* NewBloomFilterPolicy(int bits_per_key) {
return new BloomFilterPolicy(bits_per_key);
}
} | #include "gtest/gtest.h"
#include "leveldb/filter_policy.h"
#include "util/coding.h"
#include "util/logging.h"
#include "util/testutil.h"
namespace leveldb {
static const int kVerbose = 1;
static Slice Key(int i, char* buffer) {
EncodeFixed32(buffer, i);
return Slice(buffer, sizeof(uint32_t));
}
class BloomTest : public testing::Test {
public:
BloomTest() : policy_(NewBloomFilterPolicy(10)) {}
~BloomTest() { delete policy_; }
void Reset() {
keys_.clear();
filter_.clear();
}
void Add(const Slice& s) { keys_.push_back(s.ToString()); }
void Build() {
std::vector<Slice> key_slices;
for (size_t i = 0; i < keys_.size(); i++) {
key_slices.push_back(Slice(keys_[i]));
}
filter_.clear();
policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
&filter_);
keys_.clear();
if (kVerbose >= 2) DumpFilter();
}
size_t FilterSize() const { return filter_.size(); }
void DumpFilter() {
std::fprintf(stderr, "F(");
for (size_t i = 0; i + 1 < filter_.size(); i++) {
const unsigned int c = static_cast<unsigned int>(filter_[i]);
for (int j = 0; j < 8; j++) {
std::fprintf(stderr, "%c", (c & (1 << j)) ? '1' : '.');
}
}
std::fprintf(stderr, ")\n");
}
bool Matches(const Slice& s) {
if (!keys_.empty()) {
Build();
}
return policy_->KeyMayMatch(s, filter_);
}
double FalsePositiveRate() {
char buffer[sizeof(int)];
int result = 0;
for (int i = 0; i < 10000; i++) {
if (Matches(Key(i + 1000000000, buffer))) {
result++;
}
}
return result / 10000.0;
}
private:
const FilterPolicy* policy_;
std::string filter_;
std::vector<std::string> keys_;
};
TEST_F(BloomTest, EmptyFilter) {
ASSERT_TRUE(!Matches("hello"));
ASSERT_TRUE(!Matches("world"));
}
TEST_F(BloomTest, Small) {
Add("hello");
Add("world");
ASSERT_TRUE(Matches("hello"));
ASSERT_TRUE(Matches("world"));
ASSERT_TRUE(!Matches("x"));
ASSERT_TRUE(!Matches("foo"));
}
static int NextLength(int length) {
if (length < 10) {
length += 1;
} else if (length < 100) {
length += 10;
} else if (length < 1000) {
length += 100;
} else {
length += 1000;
}
return length;
}
TEST_F(BloomTest, VaryingLengths) {
char buffer[sizeof(int)];
int mediocre_filters = 0;
int good_filters = 0;
for (int length = 1; length <= 10000; length = NextLength(length)) {
Reset();
for (int i = 0; i < length; i++) {
Add(Key(i, buffer));
}
Build();
ASSERT_LE(FilterSize(), static_cast<size_t>((length * 10 / 8) + 40))
<< length;
for (int i = 0; i < length; i++) {
ASSERT_TRUE(Matches(Key(i, buffer)))
<< "Length " << length << "; key " << i;
}
double rate = FalsePositiveRate();
if (kVerbose >= 1) {
std::fprintf(stderr,
"False positives: %5.2f%% @ length = %6d ; bytes = %6d\n",
rate * 100.0, length, static_cast<int>(FilterSize()));
}
ASSERT_LE(rate, 0.02);
if (rate > 0.0125)
mediocre_filters++;
else
good_filters++;
}
if (kVerbose >= 1) {
std::fprintf(stderr, "Filters: %d good, %d mediocre\n", good_filters,
mediocre_filters);
}
ASSERT_LE(mediocre_filters, good_filters / 5);
}
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/bloom.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/bloom_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
7fc752cb-3193-459f-b0df-3863c1908434 | cpp | tensorflow/tensorflow | threadpool | third_party/xla/third_party/tsl/tsl/platform/threadpool.cc | tensorflow/core/lib/core/threadpool_test.cc | #include "tsl/platform/threadpool.h"
#define EIGEN_USE_THREADS
#include "absl/types/optional.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/context.h"
#include "tsl/platform/denormal.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/numa.h"
#include "tsl/platform/setround.h"
#include "tsl/platform/tracing.h"
#ifdef DNNL_AARCH64_USE_ACL
#include "tsl/platform/cpu_info.h"
#endif
#ifdef TENSORFLOW_THREADSCALING_EXPERIMENTAL
ABSL_FLAG(float, tensorflow_num_threads_scale_factor, 1.0,
"Allows to scale all Tensorflow ThreadPools. Total number of threads "
"in a given ThreadPool equals to num_threads * "
"tensorflow_num_threads_scale_factor. Default scale factor of 1 is a "
"no-op.");
#endif
namespace tsl {
namespace thread {
struct EigenEnvironment {
typedef Thread EnvThread;
struct TaskImpl {
std::function<void()> f;
Context context;
uint64 trace_id;
};
struct Task {
std::unique_ptr<TaskImpl> f;
};
Env* const env_;
const ThreadOptions thread_options_;
const string name_;
EigenEnvironment(Env* env, const ThreadOptions& thread_options,
const string& name)
: env_(env), thread_options_(thread_options), name_(name) {}
EnvThread* CreateThread(std::function<void()> f) {
return env_->StartThread(thread_options_, name_, [=]() {
port::ScopedFlushDenormal flush;
tsl::port::ScopedSetRound round(FE_TONEAREST);
if (thread_options_.numa_node != port::kNUMANoAffinity) {
port::NUMASetThreadNodeAffinity(thread_options_.numa_node);
}
f();
});
}
Task CreateTask(std::function<void()> f) {
uint64 id = 0;
if (tracing::EventCollector::IsEnabled()) {
id = tracing::GetUniqueArg();
tracing::RecordEvent(tracing::EventCategory::kScheduleClosure, id);
}
return Task{
std::unique_ptr<TaskImpl>(new TaskImpl{
std::move(f),
Context(ContextKind::kThread),
id,
}),
};
}
void ExecuteTask(const Task& t) {
WithContext wc(t.f->context);
tracing::ScopedRegion region(tracing::EventCategory::kRunClosure,
t.f->trace_id);
t.f->f();
}
};
ThreadPool::ThreadPool(Env* env, const string& name, int num_threads)
: ThreadPool(env, ThreadOptions(), name, num_threads, true, nullptr) {}
ThreadPool::ThreadPool(Env* env, const ThreadOptions& thread_options,
const string& name, int num_threads)
: ThreadPool(env, thread_options, name, num_threads, true, nullptr) {}
ThreadPool::ThreadPool(Env* env, const ThreadOptions& thread_options,
const string& name, int num_threads,
bool low_latency_hint, Eigen::Allocator* allocator) {
CHECK_GE(num_threads, 1);
#ifdef DNNL_AARCH64_USE_ACL
if (num_threads == tsl::port::NumTotalCPUs() && num_threads >= 16) {
num_threads = num_threads - 1;
}
#endif
#ifdef TENSORFLOW_THREADSCALING_EXPERIMENTAL
CHECK_GT(absl::GetFlag(FLAGS_tensorflow_num_threads_scale_factor), 0);
num_threads *= absl::GetFlag(FLAGS_tensorflow_num_threads_scale_factor);
if (num_threads < 1) num_threads = 1;
#endif
eigen_threadpool_.reset(new Eigen::ThreadPoolTempl<EigenEnvironment>(
num_threads, low_latency_hint,
EigenEnvironment(env, thread_options, "tf_" + name)));
underlying_threadpool_ = eigen_threadpool_.get();
threadpool_device_.reset(new Eigen::ThreadPoolDevice(underlying_threadpool_,
num_threads, allocator));
}
ThreadPool::ThreadPool(thread::ThreadPoolInterface* user_threadpool) {
underlying_threadpool_ = user_threadpool;
threadpool_device_.reset(new Eigen::ThreadPoolDevice(
underlying_threadpool_, underlying_threadpool_->NumThreads(), nullptr));
}
ThreadPool::~ThreadPool() {}
void ThreadPool::Schedule(std::function<void()> fn) {
CHECK(fn != nullptr);
underlying_threadpool_->Schedule(std::move(fn));
}
int ThreadPool::NumShardsUsedByFixedBlockSizeScheduling(
const int64_t total, const int64_t block_size) {
if (block_size <= 0 || total <= 1 || total <= block_size ||
NumThreads() == 1) {
return 1;
}
return (total + block_size - 1) / block_size;
}
int ThreadPool::NumShardsUsedByTransformRangeConcurrently(
const int64_t block_size, const int64_t total) {
return NumShardsUsedByFixedBlockSizeScheduling(total, block_size);
}
void ThreadPool::ParallelFor(int64_t total,
const SchedulingParams& scheduling_params,
const std::function<void(int64_t, int64_t)>& fn) {
switch (scheduling_params.strategy()) {
case SchedulingStrategy::kAdaptive: {
if (scheduling_params.cost_per_unit().has_value()) {
ParallelFor(total, *scheduling_params.cost_per_unit(), fn);
}
break;
}
case SchedulingStrategy::kFixedBlockSize: {
if (scheduling_params.block_size().has_value()) {
ParallelForFixedBlockSizeScheduling(
total, *scheduling_params.block_size(), fn);
}
break;
}
}
}
void ThreadPool::TransformRangeConcurrently(
const int64_t block_size, const int64_t total,
const std::function<void(int64_t, int64_t)>& fn) {
ParallelFor(total,
SchedulingParams(SchedulingStrategy::kFixedBlockSize,
absl::nullopt , block_size),
fn);
}
void ThreadPool::ParallelForFixedBlockSizeScheduling(
const int64_t total, const int64_t block_size,
const std::function<void(int64_t, int64_t)>& fn) {
const int num_shards_used =
NumShardsUsedByFixedBlockSizeScheduling(total, block_size);
if (num_shards_used == 1) {
fn(0, total);
return;
}
BlockingCounter counter(num_shards_used);
std::function<void(int64_t, int64_t)> handle_range =
[=, &handle_range, &counter, &fn](int64_t first, int64_t last) {
while (last - first > block_size) {
const int64_t mid = first + ((last - first) / 2 + block_size - 1) /
block_size * block_size;
Schedule([=, &handle_range]() { handle_range(mid, last); });
last = mid;
}
fn(first, last);
counter.DecrementCount();
};
if (num_shards_used <= NumThreads()) {
handle_range(0, total);
} else {
Schedule([=, &handle_range]() { handle_range(0, total); });
}
counter.Wait();
}
void ThreadPool::ParallelFor(int64_t total, int64_t cost_per_unit,
const std::function<void(int64_t, int64_t)>& fn) {
CHECK_GE(total, 0);
CHECK_EQ(total, (int64_t)(Eigen::Index)total);
threadpool_device_->parallelFor(
total, Eigen::TensorOpCost(0, 0, cost_per_unit),
[&fn](Eigen::Index first, Eigen::Index last) { fn(first, last); });
}
void ThreadPool::ParallelForWithWorkerId(
int64_t total, int64_t cost_per_unit,
const std::function<void(int64_t, int64_t, int)>& fn) {
CHECK_GE(total, 0);
CHECK_EQ(total, (int64_t)(Eigen::Index)total);
threadpool_device_->parallelFor(total,
Eigen::TensorOpCost(0, 0, cost_per_unit),
[this, &fn](int64_t start, int64_t limit) {
int id = CurrentThreadId() + 1;
fn(start, limit, id);
});
}
void ThreadPool::ParallelForWithWorkerId(
int64_t total, const SchedulingParams& scheduling_params,
const std::function<void(int64_t, int64_t, int)>& fn) {
ParallelFor(total, scheduling_params,
[this, &fn](int64_t start, int64_t limit) {
int id = CurrentThreadId() + 1;
fn(start, limit, id);
});
}
int ThreadPool::NumThreads() const {
return underlying_threadpool_->NumThreads();
}
int ThreadPool::CurrentThreadId() const {
return underlying_threadpool_->CurrentThreadId();
}
void ThreadPool::ScheduleWithHint(std::function<void()> fn, int start,
int limit) {
underlying_threadpool_->ScheduleWithHint(std::move(fn), start, limit);
}
void ThreadPool::SetStealPartitions(
const std::vector<std::pair<unsigned, unsigned>>& partitions) {
DCHECK(eigen_threadpool_ != nullptr);
eigen_threadpool_->SetStealPartitions(partitions);
}
Eigen::ThreadPoolInterface* ThreadPool::AsEigenThreadPool() const {
DCHECK(underlying_threadpool_ != nullptr);
return underlying_threadpool_;
}
}
} | #include "tensorflow/core/lib/core/threadpool.h"
#include <atomic>
#include <optional>
#include "absl/synchronization/barrier.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/types/optional.h"
#include "tensorflow/core/platform/context.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace thread {
static const int kNumThreads = 30;
TEST(ThreadPool, Empty) {
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
ThreadPool pool(Env::Default(), "test", num_threads);
}
}
TEST(ThreadPool, DoWork) {
Context outer_context(ContextKind::kThread);
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
{
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
pool.Schedule([&outer_context, &work, i]() {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
ASSERT_FALSE(work[i].exchange(true));
});
}
}
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
void RunWithFixedBlockSize(int64_t block_size, int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
threads->ParallelFor(
total,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kFixedBlockSize ,
std::nullopt , block_size ),
[=, &mu, &num_shards, &num_done_work, &work](int64_t start, int64_t end) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
mutex_lock l(mu);
++num_shards;
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
ASSERT_TRUE(work[i]);
}
const int64_t num_workers = (total + block_size - 1) / block_size;
if (num_workers < threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + num_workers);
}
}
TEST(ThreadPoolTest, ParallelForFixedBlockSizeScheduling) {
ThreadPool threads(Env::Default(), "test", 16);
for (auto block_size : {1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto diff : {0, 1, 11, 102, 1003, 10005, 1000007}) {
const int64_t total = block_size + diff;
RunWithFixedBlockSize(block_size, total, &threads);
}
}
}
void RunWithFixedBlockSizeTransformRangeConcurrently(int64_t block_size,
int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
threads->TransformRangeConcurrently(
block_size, total,
[=, &mu, &num_shards, &num_done_work, &work](int64_t start, int64_t end) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
mutex_lock l(mu);
++num_shards;
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
ASSERT_TRUE(work[i]);
}
const int64_t num_workers = (total + block_size - 1) / block_size;
if (num_workers < threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + num_workers);
}
}
TEST(ThreadPoolTest, TransformRangeConcurrently) {
ThreadPool threads(Env::Default(), "test", 16);
for (auto block_size : {1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto diff : {0, 1, 11, 102, 1003, 10005, 1000007}) {
const int64_t total = block_size + diff;
RunWithFixedBlockSizeTransformRangeConcurrently(block_size, total,
&threads);
}
}
}
TEST(ThreadPoolTest, NumShardsUsedByFixedBlockSizeScheduling) {
ThreadPool threads(Env::Default(), "test", 16);
EXPECT_EQ(1, threads.NumShardsUsedByFixedBlockSizeScheduling(
3 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
4 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
5 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByFixedBlockSizeScheduling(
6 , 3 ));
EXPECT_EQ(3, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 3 ));
EXPECT_EQ(7, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 1 ));
EXPECT_EQ(1, threads.NumShardsUsedByFixedBlockSizeScheduling(
7 , 0 ));
}
TEST(ThreadPoolTest, NumShardsUsedByTransformRangeConcurrently) {
ThreadPool threads(Env::Default(), "test", 16);
EXPECT_EQ(1, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 3 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 4 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 5 ));
EXPECT_EQ(2, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 6 ));
EXPECT_EQ(3, threads.NumShardsUsedByTransformRangeConcurrently(
3 , 7 ));
EXPECT_EQ(7, threads.NumShardsUsedByTransformRangeConcurrently(
1 , 7 ));
EXPECT_EQ(1, threads.NumShardsUsedByTransformRangeConcurrently(
0 , 7 ));
}
void RunFixedBlockSizeShardingWithWorkerId(int64_t block_size, int64_t total,
ThreadPool* threads) {
mutex mu;
int64_t num_done_work = 0;
std::vector<std::atomic<bool>> work(total);
for (int i = 0; i < total; i++) {
work[i] = false;
}
const int64_t num_threads = threads->NumThreads();
std::vector<std::atomic<bool>> threads_running(num_threads + 1);
for (int i = 0; i < num_threads + 1; i++) {
threads_running[i] = false;
}
threads->ParallelForWithWorkerId(
total,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kFixedBlockSize ,
std::nullopt , block_size ),
[=, &mu, &num_done_work, &work, &threads_running](int64_t start,
int64_t end, int id) {
VLOG(1) << "Shard [" << start << "," << end << ")";
EXPECT_GE(start, 0);
EXPECT_LE(end, total);
EXPECT_GE(id, 0);
EXPECT_LE(id, num_threads);
EXPECT_FALSE(threads_running[id].exchange(true));
mutex_lock l(mu);
for (; start < end; ++start) {
EXPECT_FALSE(work[start].exchange(true));
++num_done_work;
}
EXPECT_TRUE(threads_running[id].exchange(false));
});
EXPECT_EQ(num_done_work, total);
for (int i = 0; i < total; i++) {
EXPECT_TRUE(work[i]);
}
}
TEST(ThreadPoolTest, ParallelForFixedBlockSizeSchedulingWithWorkerId) {
for (int32_t num_threads : {1, 2, 3, 9, 16, 31}) {
ThreadPool threads(Env::Default(), "test", num_threads);
for (int64_t block_size : {1, 7, 10, 64, 100, 256, 1000}) {
for (int64_t diff : {0, 1, 11, 102, 1003}) {
const int64_t total = block_size + diff;
RunFixedBlockSizeShardingWithWorkerId(block_size, total, &threads);
}
}
}
}
TEST(ThreadPool, ParallelFor) {
Context outer_context(ContextKind::kThread);
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
pool.ParallelFor(kWorkItems, kHugeCost,
[&outer_context, &work](int64_t begin, int64_t end) {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
TEST(ThreadPool, ParallelForWithAdaptiveSchedulingStrategy) {
Context outer_context(ContextKind::kThread);
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
pool.ParallelFor(
kWorkItems,
ThreadPool::SchedulingParams(
ThreadPool::SchedulingStrategy::kAdaptive ,
kHugeCost , std::nullopt ),
[&outer_context, &work](int64_t begin, int64_t end) {
Context inner_context(ContextKind::kThread);
ASSERT_EQ(outer_context, inner_context);
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
}
}
TEST(ThreadPool, ParallelForWithWorkerId) {
int64_t kHugeCost = 1 << 30;
for (int num_threads = 1; num_threads < kNumThreads; num_threads++) {
fprintf(stderr, "Testing with %d threads\n", num_threads);
const int kWorkItems = 15;
std::atomic<bool> work[kWorkItems];
ThreadPool pool(Env::Default(), "test", num_threads);
for (int i = 0; i < kWorkItems; i++) {
work[i] = false;
}
std::atomic<bool> threads_running[kNumThreads + 1];
for (int i = 0; i < num_threads + 1; i++) {
threads_running[i] = false;
}
pool.ParallelForWithWorkerId(
kWorkItems, kHugeCost,
[&threads_running, &work](int64_t begin, int64_t end, int64_t id) {
ASSERT_LE(0, id);
ASSERT_LE(id, kNumThreads);
ASSERT_FALSE(threads_running[id].exchange(true));
for (int64_t i = begin; i < end; ++i) {
ASSERT_FALSE(work[i].exchange(true));
}
ASSERT_TRUE(threads_running[id].exchange(false));
threads_running[id] = false;
});
for (int i = 0; i < kWorkItems; i++) {
ASSERT_TRUE(work[i]);
}
for (int i = 0; i < num_threads + 1; i++) {
ASSERT_FALSE(threads_running[i]);
}
}
}
TEST(ThreadPool, Parallelism) {
ThreadPool pool(Env::Default(), "test", kNumThreads);
for (int iter = 0; iter < 2000; iter++) {
absl::Barrier barrier(kNumThreads);
absl::BlockingCounter counter(kNumThreads);
for (int t = 0; t < kNumThreads; ++t) {
pool.Schedule([&]() {
barrier.Block();
counter.DecrementCount();
});
}
counter.Wait();
}
}
static void BM_Sequential(::testing::benchmark::State& state) {
for (auto s : state) {
state.PauseTiming();
ThreadPool pool(Env::Default(), "test", kNumThreads);
int count = state.range(0);
mutex done_lock;
bool done_flag = false;
std::function<void()> work = [&pool, &count, &done_lock, &done_flag,
&work]() {
if (count--) {
pool.Schedule(work);
} else {
mutex_lock l(done_lock);
done_flag = true;
}
};
state.ResumeTiming();
work();
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
}
BENCHMARK(BM_Sequential)->Arg(200)->Arg(300);
static void BM_Parallel(::testing::benchmark::State& state) {
ThreadPool pool(Env::Default(), "test", kNumThreads);
std::atomic_int_fast32_t count(state.max_iterations);
mutex done_lock;
bool done_flag = false;
for (auto s : state) {
pool.Schedule([&count, &done_lock, &done_flag]() {
if (count.fetch_sub(1) == 1) {
mutex_lock l(done_lock);
done_flag = true;
}
});
}
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
BENCHMARK(BM_Parallel);
static void BM_ParallelFor(::testing::benchmark::State& state) {
int total = state.range(0);
int cost_per_unit = state.range(1);
ThreadPool pool(Env::Default(), "test", kNumThreads);
std::atomic_int_fast32_t count(state.max_iterations);
mutex done_lock;
bool done_flag = false;
for (auto s : state) {
pool.ParallelFor(
total, cost_per_unit,
[&count, &done_lock, &done_flag](int64_t begin, int64_t end) {
for (int64_t i = begin; i < end; ++i) {
if (count.fetch_sub(1) == 1) {
mutex_lock l(done_lock);
done_flag = true;
}
}
});
mutex_lock l(done_lock);
done_lock.Await(Condition(&done_flag));
}
}
BENCHMARK(BM_ParallelFor)
->ArgPair(1 << 10, 1)
->ArgPair(1 << 20, 1)
->ArgPair(1 << 10, 1 << 10)
->ArgPair(1 << 20, 1 << 10)
->ArgPair(1 << 10, 1 << 20)
->ArgPair(1 << 20, 1 << 20)
->ArgPair(1 << 10, 1 << 30)
->ArgPair(1 << 20, 1 << 30);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/threadpool_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be58cc48-cadb-4691-a6e1-36be63a06a34 | cpp | tensorflow/tensorflow | test_matchers | tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h | tensorflow/compiler/mlir/tf2xla/internal/test_matchers_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_TEST_MATCHERS_H_
#define TENSORFLOW_COMPILER_MLIR_TF2XLA_INTERNAL_TEST_MATCHERS_H_
#include <gmock/gmock.h>
#include "absl/status/statusor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tsl/platform/statusor.h"
template <typename T>
bool WasGraphAnalysisFailure(const absl::StatusOr<T>& status) {
return (status.status() ==
tensorflow::CompileToHloGraphAnalysisFailedError());
}
MATCHER(IsOkOrFiltered,
"Status was OK or equal to the Graph Analysis failure") {
bool is_ok = arg.ok();
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
return testing::ExplainMatchResult(
testing::IsTrue(), is_ok || graph_analysis_failure, result_listener);
}
MATCHER_P2(IncrementedOrFiltered, metric, value,
"Metric was incremented by value or Status equal to the Graph "
"Analysis failure") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
return testing::ExplainMatchResult(testing::Eq(metric), value,
result_listener);
}
MATCHER_P(ComputationProtoContains, regex,
"If not a Graph Analysis failure then matches the computation result "
"with the regex") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto proto = arg.value().computation->proto().DebugString();
return testing::ExplainMatchResult(testing::ContainsRegex(regex), proto,
result_listener);
}
MATCHER_P(XlaComputationProtoContains, regex,
"If not a Graph Analysis failure then matches the computation result "
"with the regex") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto proto = arg.value().proto().DebugString();
return testing::ExplainMatchResult(testing::ContainsRegex(regex), proto,
result_listener);
}
MATCHER_P(
HasMlirModuleWith, expected,
"If not a Graph Analysis failure then matches the mlir module result") {
auto graph_analysis_failure = WasGraphAnalysisFailure(arg);
if (graph_analysis_failure) {
return testing::ExplainMatchResult(testing::IsTrue(),
graph_analysis_failure, result_listener);
}
auto actual = arg.value();
return testing::ExplainMatchResult(testing::ContainsRegex(expected), actual,
result_listener);
}
#endif | #include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include <cstdint>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/service/hlo.pb.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tsl/platform/statusor.h"
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Not;
constexpr char kMetric[] = "/tensorflow/metric";
auto* counter =
tensorflow::monitoring::Counter<1>::New(kMetric, "description", "status");
constexpr char kOkStatus[] = "ok";
const int kArbitraryIntResult = 37;
template <typename T>
tsl::StatusOr<T> success(T t) {
return t;
}
absl::StatusOr<int> success() { return kArbitraryIntResult; }
template <typename T>
tsl::StatusOr<T> filtered(T t) {
return tsl::StatusOr<T>(tensorflow::CompileToHloGraphAnalysisFailedError());
}
absl::StatusOr<int> filtered() { return filtered(kArbitraryIntResult); }
absl::StatusOr<int> failed() {
return absl::StatusOr<int>(absl::InternalError("fail"));
}
TEST(TestUtil, MatchesOk) { ASSERT_THAT(success(), IsOkOrFiltered()); }
TEST(TestUtil, DoesntMatchesFailure) {
ASSERT_THAT(failed(), Not(IsOkOrFiltered()));
}
TEST(TestUtil, MatchesFiltered) { ASSERT_THAT(filtered(), IsOkOrFiltered()); }
TEST(TestUtil, IncrementsOk) {
CellReader<int64_t> reader(kMetric);
counter->GetCell(kOkStatus)->IncrementBy(1);
ASSERT_THAT(success(), IncrementedOrFiltered(reader.Delta(kOkStatus), 1));
}
TEST(TestUtil, FilteredDoesntIncrementsOk) {
CellReader<int64_t> reader(kMetric);
ASSERT_THAT(filtered(), IncrementedOrFiltered(reader.Delta(kOkStatus), 1));
}
TEST(TestUtil, FailureDoesntMatchIncrement) {
CellReader<int64_t> reader(kMetric);
ASSERT_THAT(failed(), Not(IncrementedOrFiltered(reader.Delta(kOkStatus), 1)));
}
tensorflow::XlaCompilationResult CreateXlaComputationResult(
const char* hlo_name) {
auto result = tensorflow::XlaCompilationResult();
xla::HloModuleProto hlo;
hlo.set_name(hlo_name);
result.computation = std::make_shared<xla::XlaComputation>(hlo);
return result;
}
TEST(TestUtil, ComputationContainsOk) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(success(result), ComputationProtoContains(arbitrary_hlo));
}
TEST(TestUtil, ComputationDoesNotContain) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
constexpr char bad_hlo[] = "bad_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(success(result), Not(ComputationProtoContains(bad_hlo)));
}
TEST(TestUtil, ComputationDoesNotContainFiltered) {
constexpr char arbitrary_hlo[] = "arbitrary_hlo";
constexpr char bad_hlo[] = "bad_hlo";
auto result = CreateXlaComputationResult(arbitrary_hlo);
ASSERT_THAT(filtered(result), ComputationProtoContains(bad_hlo));
}
TEST(TestUtil, MlirModuleHas) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
ASSERT_THAT(success(arbirary_mlir), HasMlirModuleWith(arbirary_mlir));
}
TEST(TestUtil, MlirModuleDoesNotHave) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
constexpr char bad_mlir[] = "bad_mlir";
ASSERT_THAT(success(arbirary_mlir), Not(HasMlirModuleWith(bad_mlir)));
}
TEST(TestUtil, MlirModuleDoesNotHaveFiltered) {
constexpr char arbirary_mlir[] = "arbirary_mlir";
constexpr char bad_mlir[] = "bad_mlir";
ASSERT_THAT(filtered(arbirary_mlir), HasMlirModuleWith(bad_mlir));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/test_matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
24c0b822-8f7f-46ea-8ca8-f947c4705560 | cpp | tensorflow/tensorflow | manual_constructor | tensorflow/core/lib/gtl/manual_constructor.h | tensorflow/core/lib/gtl/manual_constructor_test.cc | #ifndef TENSORFLOW_CORE_LIB_GTL_MANUAL_CONSTRUCTOR_H_
#define TENSORFLOW_CORE_LIB_GTL_MANUAL_CONSTRUCTOR_H_
#include <stddef.h>
#include <new>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
namespace tensorflow {
namespace gtl {
namespace internal {
#ifndef SWIG
template <int alignment, int size>
struct AlignType {};
template <int size>
struct AlignType<0, size> {
typedef char result[size];
};
#if defined(_MSC_VER)
#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __declspec(align(X))
#define TF_LIB_GTL_ALIGN_OF(T) __alignof(T)
#else
#define TF_LIB_GTL_ALIGN_ATTRIBUTE(X) __attribute__((aligned(X)))
#define TF_LIB_GTL_ALIGN_OF(T) __alignof__(T)
#endif
#if defined(TF_LIB_GTL_ALIGN_ATTRIBUTE)
#define TF_LIB_GTL_ALIGNTYPE_TEMPLATE(X) \
template <int size> \
struct AlignType<X, size> { \
typedef TF_LIB_GTL_ALIGN_ATTRIBUTE(X) char result[size]; \
}
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(16);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(32);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(64);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(128);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(256);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(512);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(1024);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(2048);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(4096);
TF_LIB_GTL_ALIGNTYPE_TEMPLATE(8192);
#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
typename tensorflow::gtl::internal::AlignType<TF_LIB_GTL_ALIGN_OF(T), \
sizeof(T) * Size>::result
#undef TF_LIB_GTL_ALIGNTYPE_TEMPLATE
#undef TF_LIB_GTL_ALIGN_ATTRIBUTE
#else
#error "You must define TF_LIB_GTL_ALIGNED_CHAR_ARRAY for your compiler."
#endif
#else
template <typename Size>
struct AlignType {
typedef char result[Size];
};
#define TF_LIB_GTL_ALIGNED_CHAR_ARRAY(T, Size) \
tensorflow::gtl::internal::AlignType<Size * sizeof(T)>::result
#define TF_LIB_GTL_ALIGN_OF(Type) 16
#endif
}
}
template <typename Type>
class ManualConstructor {
public:
static void* operator new[](size_t size) {
return port::AlignedMalloc(size, TF_LIB_GTL_ALIGN_OF(Type));
}
static void operator delete[](void* mem) { port::AlignedFree(mem); }
inline Type* get() { return reinterpret_cast<Type*>(space_); }
inline const Type* get() const {
return reinterpret_cast<const Type*>(space_);
}
inline Type* operator->() { return get(); }
inline const Type* operator->() const { return get(); }
inline Type& operator*() { return *get(); }
inline const Type& operator*() const { return *get(); }
inline void Init() { new (space_) Type; }
#ifdef LANG_CXX11
template <typename... Ts>
inline void Init(Ts&&... args) {
new (space_) Type(std::forward<Ts>(args)...);
}
#else
template <typename T1>
inline void Init(const T1& p1) {
new (space_) Type(p1);
}
template <typename T1, typename T2>
inline void Init(const T1& p1, const T2& p2) {
new (space_) Type(p1, p2);
}
template <typename T1, typename T2, typename T3>
inline void Init(const T1& p1, const T2& p2, const T3& p3) {
new (space_) Type(p1, p2, p3);
}
template <typename T1, typename T2, typename T3, typename T4>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
new (space_) Type(p1, p2, p3, p4);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5) {
new (space_) Type(p1, p2, p3, p4, p5);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6) {
new (space_) Type(p1, p2, p3, p4, p5, p6);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
}
template <typename T1, typename T2, typename T3, typename T4, typename T5,
typename T6, typename T7, typename T8, typename T9, typename T10,
typename T11>
inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
const T5& p5, const T6& p6, const T7& p7, const T8& p8,
const T9& p9, const T10& p10, const T11& p11) {
new (space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
}
#endif
inline void Destroy() { get()->~Type(); }
private:
TF_LIB_GTL_ALIGNED_CHAR_ARRAY(Type, 1) space_;
};
#undef TF_LIB_GTL_ALIGNED_CHAR_ARRAY
#undef TF_LIB_GTL_ALIGN_OF
}
#endif | #include "tensorflow/core/lib/gtl/manual_constructor.h"
#include <stdint.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static int constructor_count_ = 0;
template <int kSize>
struct TestN {
TestN() { ++constructor_count_; }
~TestN() { --constructor_count_; }
char a[kSize];
};
typedef TestN<1> Test1;
typedef TestN<2> Test2;
typedef TestN<3> Test3;
typedef TestN<4> Test4;
typedef TestN<5> Test5;
typedef TestN<9> Test9;
typedef TestN<15> Test15;
}
namespace {
TEST(ManualConstructorTest, Sizeof) {
CHECK_EQ(sizeof(ManualConstructor<Test1>), sizeof(Test1));
CHECK_EQ(sizeof(ManualConstructor<Test2>), sizeof(Test2));
CHECK_EQ(sizeof(ManualConstructor<Test3>), sizeof(Test3));
CHECK_EQ(sizeof(ManualConstructor<Test4>), sizeof(Test4));
CHECK_EQ(sizeof(ManualConstructor<Test5>), sizeof(Test5));
CHECK_EQ(sizeof(ManualConstructor<Test9>), sizeof(Test9));
CHECK_EQ(sizeof(ManualConstructor<Test15>), sizeof(Test15));
CHECK_EQ(constructor_count_, 0);
ManualConstructor<Test1> mt[4];
CHECK_EQ(sizeof(mt), 4);
CHECK_EQ(constructor_count_, 0);
mt[0].Init();
CHECK_EQ(constructor_count_, 1);
mt[0].Destroy();
}
TEST(ManualConstructorTest, Alignment) {
struct {
char a;
ManualConstructor<void*> b;
} test1;
struct {
char a;
void* b;
} control1;
EXPECT_EQ(reinterpret_cast<char*>(test1.b.get()) - &test1.a,
reinterpret_cast<char*>(&control1.b) - &control1.a);
EXPECT_EQ(reinterpret_cast<intptr_t>(test1.b.get()) % sizeof(control1.b), 0);
struct {
char a;
ManualConstructor<long double> b;
} test2;
struct {
char a;
long double b;
} control2;
EXPECT_EQ(reinterpret_cast<char*>(test2.b.get()) - &test2.a,
reinterpret_cast<char*>(&control2.b) - &control2.a);
EXPECT_EQ(reinterpret_cast<intptr_t>(test2.b.get()) % alignof(long double),
0);
}
TEST(ManualConstructorTest, DefaultInitialize) {
struct X {
X() : x(123) {}
int x;
};
union {
ManualConstructor<X> x;
ManualConstructor<int> y;
} u;
*u.y = -1;
u.x.Init();
EXPECT_EQ(123, u.x->x);
}
TEST(ManualConstructorTest, ZeroInitializePOD) {
union {
ManualConstructor<int> x;
ManualConstructor<int> y;
} u;
*u.y = -1;
u.x.Init();
EXPECT_EQ(-1, *u.y);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/manual_constructor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/gtl/manual_constructor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e1e05b95-3168-49b8-bae6-0e35ebebf454 | cpp | tensorflow/tensorflow | ptx_compiler | third_party/xla/xla/stream_executor/cuda/ptx_compiler.h | third_party/xla/xla/stream_executor/cuda/ptx_compiler_test.cc | #ifndef XLA_STREAM_EXECUTOR_CUDA_PTX_COMPILER_H_
#define XLA_STREAM_EXECUTOR_CUDA_PTX_COMPILER_H_
#include <cstdint>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<std::vector<uint8_t>> CompileGpuAsmUsingLibNvPtxCompiler(
int cc_major, int cc_minor, const char* ptx_contents, GpuAsmOpts options,
bool cancel_if_reg_spill);
absl::StatusOr<SemanticVersion> GetLibNvPtxCompilerVersion();
}
#endif | #include "xla/stream_executor/cuda/ptx_compiler.h"
#include <sys/types.h>
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/cuda/ptx_compiler_support.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/gpu/gpu_asm_opts.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace {
constexpr const char kSpillingPtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi(
.param .u64 _Z6kernelPi_param_0
)
.maxnreg 16
{
.reg .b32 %r<33>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
ld.global.u32 %r1, [%rd2+4];
ld.global.u32 %r2, [%rd2+8];
ld.global.u32 %r3, [%rd2+12];
ld.global.u32 %r4, [%rd2+16];
ld.global.u32 %r5, [%rd2+20];
ld.global.u32 %r6, [%rd2+24];
ld.global.u32 %r7, [%rd2+28];
ld.global.u32 %r8, [%rd2+32];
ld.global.u32 %r9, [%rd2+36];
ld.global.u32 %r10, [%rd2+40];
ld.global.u32 %r11, [%rd2+44];
ld.global.u32 %r12, [%rd2+48];
ld.global.u32 %r13, [%rd2+52];
ld.global.u32 %r14, [%rd2+56];
ld.global.u32 %r15, [%rd2+60];
add.s32 %r16, %r15, 15;
st.global.u32 [%rd2+60], %r16;
add.s32 %r17, %r14, 15;
st.global.u32 [%rd2+56], %r17;
add.s32 %r18, %r13, 15;
st.global.u32 [%rd2+52], %r18;
add.s32 %r19, %r12, 15;
st.global.u32 [%rd2+48], %r19;
add.s32 %r20, %r11, 15;
st.global.u32 [%rd2+44], %r20;
add.s32 %r21, %r10, 15;
st.global.u32 [%rd2+40], %r21;
add.s32 %r22, %r9, 15;
st.global.u32 [%rd2+36], %r22;
add.s32 %r23, %r8, 15;
st.global.u32 [%rd2+32], %r23;
add.s32 %r24, %r7, 15;
st.global.u32 [%rd2+28], %r24;
add.s32 %r25, %r6, 15;
st.global.u32 [%rd2+24], %r25;
add.s32 %r26, %r5, 15;
st.global.u32 [%rd2+20], %r26;
add.s32 %r27, %r4, 15;
st.global.u32 [%rd2+16], %r27;
add.s32 %r28, %r3, 15;
st.global.u32 [%rd2+12], %r28;
add.s32 %r29, %r2, 15;
st.global.u32 [%rd2+8], %r29;
add.s32 %r30, %r1, 15;
st.global.u32 [%rd2+4], %r30;
ld.global.u32 %r31, [%rd2];
add.s32 %r32, %r31, 15;
st.global.u32 [%rd2], %r32;
ret;
}
)";
constexpr const char kSimplePtx[] = R"(
.version 8.0
.target sm_52
.address_size 64
.visible .entry _Z6kernelPi (
.param .u64 _Z6kernelPi_param_0
)
{
.reg .b32 %r<16>;
.reg .b64 %rd<3>;
ld.param.u64 %rd1, [_Z6kernelPi_param_0];
cvta.to.global.u64 %rd2, %rd1;
mov.u32 %r1, 42;
st.global.u32 [%rd2], %r15;
ret;
})";
constexpr stream_executor::CudaComputeCapability kDefaultComputeCapability{5,
2};
absl::StatusOr<std::vector<uint8_t>> CompileHelper(
stream_executor::CudaComputeCapability cc, const char* const ptx_input,
bool disable_gpuasm_optimizations = false, bool cancel_if_reg_spill = false,
std::vector<std::string> extra_flags = {}) {
stream_executor::GpuAsmOpts options(disable_gpuasm_optimizations,
"", extra_flags);
return stream_executor::CompileGpuAsmUsingLibNvPtxCompiler(
cc.major, cc.minor, ptx_input, options, cancel_if_reg_spill);
}
class PtxCompilerTest : public ::testing::Test {
void SetUp() override {
if (!stream_executor::IsLibNvPtxCompilerSupported()) {
GTEST_SKIP();
}
}
};
TEST_F(PtxCompilerTest, IdentifiesUnsupportedArchitecture) {
EXPECT_THAT(
CompileHelper(stream_executor::CudaComputeCapability{100, 0}, kSimplePtx),
tsl::testing::StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_F(PtxCompilerTest, CanCompileSingleCompilationUnit) {
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSimplePtx),
tsl::testing::IsOk());
}
TEST_F(PtxCompilerTest, CancelsOnRegSpill) {
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSpillingPtx,
true,
true),
tsl::testing::StatusIs(absl::StatusCode::kCancelled));
EXPECT_THAT(CompileHelper(kDefaultComputeCapability, kSpillingPtx,
true,
false),
tsl::testing::IsOk());
}
TEST_F(PtxCompilerTest, AcceptsExtraArguments) {
auto reference_cubin = CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {});
auto cubin_with_line_info =
CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {"--generate-line-info"});
EXPECT_THAT(reference_cubin, tsl::testing::IsOk());
EXPECT_THAT(cubin_with_line_info, tsl::testing::IsOk());
EXPECT_GT(cubin_with_line_info->size(), reference_cubin->size());
EXPECT_THAT(
CompileHelper(kDefaultComputeCapability, kSimplePtx,
false,
false, {"--flag-does-not-exist"}),
tsl::testing::StatusIs(absl::StatusCode::kInternal));
}
TEST_F(PtxCompilerTest, ReturnsReasonableVersion) {
constexpr stream_executor::SemanticVersion kMinSupportedVersion = {12, 0, 0};
EXPECT_THAT(stream_executor::GetLibNvPtxCompilerVersion(),
tsl::testing::IsOkAndHolds(testing::Ge(kMinSupportedVersion)));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/ptx_compiler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/ptx_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
379c40f5-6a9c-41ec-aaa3-7c8015cd1416 | cpp | tensorflow/tensorflow | quantized_add_op | tensorflow/core/kernels/quantized_add_op.cc | tensorflow/core/kernels/quantized_add_op_test.cc | #define EIGEN_USE_THREADS
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define USE_NEON
#define QUANTIZED_ADD_USE_NEON
#include <arm_neon.h>
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/meta_support.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/util/bcast.h"
namespace tensorflow {
namespace {
template <class T, class Toutput>
void ScalarAddition(OpKernelContext* context, const T* full_input,
float full_input_min, float full_input_max,
int64_t num_elements, T scalar_input,
float scalar_input_min, float scalar_input_max,
float output_min, float output_max, Toutput* output) {
const Toutput scalar_in_output_range = RequantizeInNewRange<T, Toutput>(
scalar_input, scalar_input_min, scalar_input_max, output_min, output_max);
for (int i = 0; i < num_elements; ++i) {
const Toutput full_input_in_output_range = RequantizeInNewRange<T, Toutput>(
full_input[i], full_input_min, full_input_max, output_min, output_max);
output[i] = full_input_in_output_range + scalar_in_output_range;
}
}
#ifdef QUANTIZED_ADD_USE_NEON
template <>
void ScalarAddition(OpKernelContext* context, const quint8* full_input,
float full_input_min, float full_input_max,
int64 num_elements, quint8 scalar_input,
float scalar_input_min, float scalar_input_max,
float output_min, float output_max, qint32* output) {
const int32 scalar_in_output_range = RequantizeInNewRange<quint8, qint32>(
scalar_input, scalar_input_min, scalar_input_max, output_min, output_max);
const float input_0_float =
QuantizedToFloat<quint8>(0, full_input_min, full_input_max);
const float input_1_float =
QuantizedToFloat<quint8>(1, full_input_min, full_input_max);
const int64 input_0_int64 =
FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max);
const int64 input_1_int64 =
FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max);
const int32 input_mult_int32 = input_1_int64 - input_0_int64;
const int64 lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64 highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
const int64x2_t input_0_64x2 = vmovq_n_s64(input_0_int64);
const int32x2_t input_mult_32x2 = vmov_n_s32(input_mult_int32);
const int32x4_t scalar_in_output_range_32x4 =
vmovq_n_s32(scalar_in_output_range);
int64 i = 0;
for (; i < (num_elements - 7); i += 8) {
const uint8* full_input_ptr = &(full_input->value) + i;
const std::array<int32x4_t, 2> output_value =
Requantize8x8To32Neon(full_input_ptr, input_0_64x2, input_mult_32x2);
const int32x4_t result_low_32x4 =
vaddq_s32(output_value[0], scalar_in_output_range_32x4);
const int32x4_t result_high_32x4 =
vaddq_s32(output_value[1], scalar_in_output_range_32x4);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, result_low_32x4);
vst1q_s32(output_ptr + 4, result_high_32x4);
}
for (; i < num_elements; ++i) {
const int64 full_input_value = static_cast<int64_t>(full_input[i]);
int64 full_input_in_output_range_64 =
input_0_int64 + (full_input_value * input_mult_int32);
full_input_in_output_range_64 =
std::max(full_input_in_output_range_64, lowest_quantized);
full_input_in_output_range_64 =
std::min(full_input_in_output_range_64, highest_quantized);
const int32 full_input_in_output_range =
static_cast<int32>(full_input_in_output_range_64);
output[i] = full_input_in_output_range + scalar_in_output_range;
}
}
#else
template <>
void ScalarAddition(OpKernelContext* context, const quint8* full_input,
float full_input_min, float full_input_max,
int64_t num_elements, quint8 scalar_input,
float scalar_input_min, float scalar_input_max,
float output_min, float output_max, qint32* output) {
const int32_t scalar_in_output_range = RequantizeInNewRange<quint8, qint32>(
scalar_input, scalar_input_min, scalar_input_max, output_min, output_max);
const float input_0_float =
QuantizedToFloat<quint8>(0, full_input_min, full_input_max);
const float input_1_float =
QuantizedToFloat<quint8>(1, full_input_min, full_input_max);
const int64_t input_0_int64 =
FloatToQuantizedUnclamped<qint32>(input_0_float, output_min, output_max);
const int64_t input_1_int64 =
FloatToQuantizedUnclamped<qint32>(input_1_float, output_min, output_max);
const int32_t input_mult_int32 = input_1_int64 - input_0_int64;
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64_t highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
for (int i = 0; i < num_elements; ++i) {
const int64_t full_input_value = static_cast<int64_t>(full_input[i]);
int64_t full_input_in_output_range_64 =
input_0_int64 + (full_input_value * input_mult_int32);
full_input_in_output_range_64 =
std::max(full_input_in_output_range_64, lowest_quantized);
full_input_in_output_range_64 =
std::min(full_input_in_output_range_64, highest_quantized);
const int32_t full_input_in_output_range =
static_cast<int32>(full_input_in_output_range_64);
output[i] = full_input_in_output_range + scalar_in_output_range;
}
}
#endif
template <class T, class Toutput>
void VectorAddition(OpKernelContext* context, const T* x_data, float min_x,
float max_x, const T* y_data, float min_y, float max_y,
int64_t num_elements, float output_min, float output_max,
Toutput* output) {
for (int i = 0; i < num_elements; ++i) {
const Toutput x_in_output_range = RequantizeInNewRange<T, Toutput>(
x_data[i], min_x, max_x, output_min, output_max);
const Toutput y_in_output_range = RequantizeInNewRange<T, Toutput>(
y_data[i], min_y, max_y, output_min, output_max);
output[i] = x_in_output_range + y_in_output_range;
}
}
#ifdef QUANTIZED_ADD_USE_NEON
template <>
void VectorAddition(OpKernelContext* context, const quint8* x_data, float min_x,
float max_x, const quint8* y_data, float min_y, float max_y,
int64 num_elements, float output_min, float output_max,
qint32* output) {
const float x_0_float = QuantizedToFloat<quint8>(0, min_x, max_x);
const float x_1_float = QuantizedToFloat<quint8>(1, min_x, max_x);
const int64 x_0_int64 =
FloatToQuantizedUnclamped<qint32>(x_0_float, output_min, output_max);
const int64 x_1_int64 =
FloatToQuantizedUnclamped<qint32>(x_1_float, output_min, output_max);
const int32 x_mult_int32 = x_1_int64 - x_0_int64;
const float y_0_float = QuantizedToFloat<quint8>(0, min_y, max_y);
const float y_1_float = QuantizedToFloat<quint8>(1, min_y, max_y);
const int64 y_0_int64 =
FloatToQuantizedUnclamped<qint32>(y_0_float, output_min, output_max);
const int64 y_1_int64 =
FloatToQuantizedUnclamped<qint32>(y_1_float, output_min, output_max);
const int32 y_mult_int32 = y_1_int64 - y_0_int64;
const int64 lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64 highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
const int64x2_t x_0_64x2 = vmovq_n_s64(x_0_int64);
const int32x2_t x_mult_32x2 = vmov_n_s32(x_mult_int32);
const int64x2_t y_0_64x2 = vmovq_n_s64(y_0_int64);
const int32x2_t y_mult_32x2 = vmov_n_s32(y_mult_int32);
int64 i = 0;
for (; i < (num_elements - 7); i += 8) {
const uint8* x_ptr = &(x_data->value) + i;
const std::array<int32x4_t, 2> x_output_value =
Requantize8x8To32Neon(x_ptr, x_0_64x2, x_mult_32x2);
const uint8* y_ptr = &(y_data->value) + i;
const std::array<int32x4_t, 2> y_output_value =
Requantize8x8To32Neon(y_ptr, y_0_64x2, y_mult_32x2);
const int32x4_t result_low_32x4 =
vaddq_s32(x_output_value[0], y_output_value[0]);
const int32x4_t result_high_32x4 =
vaddq_s32(x_output_value[1], y_output_value[1]);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, result_low_32x4);
vst1q_s32(output_ptr + 4, result_high_32x4);
}
for (; i < num_elements; ++i) {
const int64 x_value = static_cast<int64_t>(x_data[i]);
int64 x_in_output_range_64 = x_0_int64 + (x_value * x_mult_int32);
x_in_output_range_64 = std::max(x_in_output_range_64, lowest_quantized);
x_in_output_range_64 = std::min(x_in_output_range_64, highest_quantized);
const int32 x_in_output_range = static_cast<int32>(x_in_output_range_64);
const int64 y_value = static_cast<int64_t>(y_data[i]);
int64 y_in_output_range_64 = y_0_int64 + (y_value * y_mult_int32);
y_in_output_range_64 = std::max(y_in_output_range_64, lowest_quantized);
y_in_output_range_64 = std::min(y_in_output_range_64, highest_quantized);
const int32 y_in_output_range = static_cast<int32>(y_in_output_range_64);
output[i] = x_in_output_range + y_in_output_range;
}
}
#else
template <>
void VectorAddition(OpKernelContext* context, const quint8* x_data, float min_x,
float max_x, const quint8* y_data, float min_y, float max_y,
int64_t num_elements, float output_min, float output_max,
qint32* output) {
const float x_0_float = QuantizedToFloat<quint8>(0, min_x, max_x);
const float x_1_float = QuantizedToFloat<quint8>(1, min_x, max_x);
const int64_t x_0_int64 =
FloatToQuantizedUnclamped<qint32>(x_0_float, output_min, output_max);
const int64_t x_1_int64 =
FloatToQuantizedUnclamped<qint32>(x_1_float, output_min, output_max);
const int32_t x_mult_int32 = x_1_int64 - x_0_int64;
const float y_0_float = QuantizedToFloat<quint8>(0, min_y, max_y);
const float y_1_float = QuantizedToFloat<quint8>(1, min_y, max_y);
const int64_t y_0_int64 =
FloatToQuantizedUnclamped<qint32>(y_0_float, output_min, output_max);
const int64_t y_1_int64 =
FloatToQuantizedUnclamped<qint32>(y_1_float, output_min, output_max);
const int32_t y_mult_int32 = y_1_int64 - y_0_int64;
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64_t highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
for (int i = 0; i < num_elements; ++i) {
const int64_t x_value = static_cast<int64_t>(x_data[i]);
int64_t x_in_output_range_64 = x_0_int64 + (x_value * x_mult_int32);
x_in_output_range_64 = std::max(x_in_output_range_64, lowest_quantized);
x_in_output_range_64 = std::min(x_in_output_range_64, highest_quantized);
const int32_t x_in_output_range = static_cast<int32>(x_in_output_range_64);
const int64_t y_value = static_cast<int64_t>(y_data[i]);
int64_t y_in_output_range_64 = y_0_int64 + (y_value * y_mult_int32);
y_in_output_range_64 = std::max(y_in_output_range_64, lowest_quantized);
y_in_output_range_64 = std::min(y_in_output_range_64, highest_quantized);
const int32_t y_in_output_range = static_cast<int32>(y_in_output_range_64);
output[i] = x_in_output_range + y_in_output_range;
}
}
#endif
template <class T, class Toutput>
void VectorTensorAddition(const T* vector_data, float min_vector,
float max_vector, int64_t vector_num_elements,
const T* tensor_data, float min_tensor,
float max_tensor, int64_t tensor_num_elements,
float output_min, float output_max, Toutput* output) {
for (int i = 0; i < tensor_num_elements; ++i) {
const int64_t vector_i = i % vector_num_elements;
const Toutput vector_in_output_range = RequantizeInNewRange<T, Toutput>(
vector_data[vector_i], min_vector, max_vector, output_min, output_max);
const Toutput tensor_in_output_range = RequantizeInNewRange<T, Toutput>(
tensor_data[i], min_tensor, max_tensor, output_min, output_max);
output[i] = vector_in_output_range + tensor_in_output_range;
}
}
#ifdef QUANTIZED_ADD_USE_NEON
template <>
void VectorTensorAddition(const quint8* vector_data, float min_vector,
float max_vector, int64 vector_num_elements,
const quint8* tensor_data, float min_tensor,
float max_tensor, int64 tensor_num_elements,
float output_min, float output_max, qint32* output) {
const float vector_0_float =
QuantizedToFloat<quint8>(0, min_vector, max_vector);
const float vector_1_float =
QuantizedToFloat<quint8>(1, min_vector, max_vector);
const int64 vector_0_int64 =
FloatToQuantizedUnclamped<qint32>(vector_0_float, output_min, output_max);
const int64 vector_1_int64 =
FloatToQuantizedUnclamped<qint32>(vector_1_float, output_min, output_max);
const int32 vector_mult_int32 = vector_1_int64 - vector_0_int64;
const float tensor_0_float =
QuantizedToFloat<quint8>(0, min_tensor, max_tensor);
const float tensor_1_float =
QuantizedToFloat<quint8>(1, min_tensor, max_tensor);
const int64 tensor_0_int64 =
FloatToQuantizedUnclamped<qint32>(tensor_0_float, output_min, output_max);
const int64 tensor_1_int64 =
FloatToQuantizedUnclamped<qint32>(tensor_1_float, output_min, output_max);
const int32 tensor_mult_int32 = tensor_1_int64 - tensor_0_int64;
const int64 lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64 highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
const int64x2_t vector_0_64x2 = vmovq_n_s64(vector_0_int64);
const int32x2_t vector_mult_32x2 = vmov_n_s32(vector_mult_int32);
const int64x2_t tensor_0_64x2 = vmovq_n_s64(tensor_0_int64);
const int32x2_t tensor_mult_32x2 = vmov_n_s32(tensor_mult_int32);
for (int64 base_i = 0; base_i < tensor_num_elements;
base_i += vector_num_elements) {
int64 i = base_i;
int64 vector_i = 0;
for (; vector_i < (vector_num_elements - 7); vector_i += 8, i += 8) {
const uint8* vector_ptr = &(vector_data->value) + vector_i;
const std::array<int32x4_t, 2> vector_output_value =
Requantize8x8To32Neon(vector_ptr, vector_0_64x2, vector_mult_32x2);
const uint8* tensor_ptr = &(tensor_data->value) + i;
const std::array<int32x4_t, 2> tensor_output_value =
Requantize8x8To32Neon(tensor_ptr, tensor_0_64x2, tensor_mult_32x2);
const int32x4_t result_low_32x4 =
vaddq_s32(vector_output_value[0], tensor_output_value[0]);
const int32x4_t result_high_32x4 =
vaddq_s32(vector_output_value[1], tensor_output_value[1]);
int32* output_ptr = &(output->value) + i;
vst1q_s32(output_ptr + 0, result_low_32x4);
vst1q_s32(output_ptr + 4, result_high_32x4);
}
for (; vector_i < vector_num_elements; ++vector_i, ++i) {
const int64 vector_value = static_cast<int64_t>(vector_data[vector_i]);
int64 vector_in_output_range_64 =
vector_0_int64 + (vector_value * vector_mult_int32);
vector_in_output_range_64 =
std::max(vector_in_output_range_64, lowest_quantized);
vector_in_output_range_64 =
std::min(vector_in_output_range_64, highest_quantized);
const int32 vector_in_output_range =
static_cast<int32>(vector_in_output_range_64);
const int64 tensor_value = static_cast<int64_t>(tensor_data[i]);
int64 tensor_in_output_range_64 =
tensor_0_int64 + (tensor_value * tensor_mult_int32);
tensor_in_output_range_64 =
std::max(tensor_in_output_range_64, lowest_quantized);
tensor_in_output_range_64 =
std::min(tensor_in_output_range_64, highest_quantized);
const int32 tensor_in_output_range =
static_cast<int32>(tensor_in_output_range_64);
output[i] = vector_in_output_range + tensor_in_output_range;
}
}
}
#else
template <>
void VectorTensorAddition(const quint8* vector_data, float min_vector,
float max_vector, int64_t vector_num_elements,
const quint8* tensor_data, float min_tensor,
float max_tensor, int64_t tensor_num_elements,
float output_min, float output_max, qint32* output) {
const float vector_0_float =
QuantizedToFloat<quint8>(0, min_vector, max_vector);
const float vector_1_float =
QuantizedToFloat<quint8>(1, min_vector, max_vector);
const int64_t vector_0_int64 =
FloatToQuantizedUnclamped<qint32>(vector_0_float, output_min, output_max);
const int64_t vector_1_int64 =
FloatToQuantizedUnclamped<qint32>(vector_1_float, output_min, output_max);
const int32_t vector_mult_int32 = vector_1_int64 - vector_0_int64;
const float tensor_0_float =
QuantizedToFloat<quint8>(0, min_tensor, max_tensor);
const float tensor_1_float =
QuantizedToFloat<quint8>(1, min_tensor, max_tensor);
const int64_t tensor_0_int64 =
FloatToQuantizedUnclamped<qint32>(tensor_0_float, output_min, output_max);
const int64_t tensor_1_int64 =
FloatToQuantizedUnclamped<qint32>(tensor_1_float, output_min, output_max);
const int32_t tensor_mult_int32 = tensor_1_int64 - tensor_0_int64;
const int64_t lowest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::lowest());
const int64_t highest_quantized =
static_cast<int64_t>(Eigen::NumTraits<qint32>::highest());
for (int i = 0; i < tensor_num_elements; ++i) {
const int64_t vector_i = i % vector_num_elements;
const int64_t vector_value = static_cast<int64_t>(vector_data[vector_i]);
int64_t vector_in_output_range_64 =
vector_0_int64 + (vector_value * vector_mult_int32);
vector_in_output_range_64 =
std::max(vector_in_output_range_64, lowest_quantized);
vector_in_output_range_64 =
std::min(vector_in_output_range_64, highest_quantized);
const int32_t vector_in_output_range =
static_cast<int32>(vector_in_output_range_64);
const int64_t tensor_value = static_cast<int64_t>(tensor_data[i]);
int64_t tensor_in_output_range_64 =
tensor_0_int64 + (tensor_value * tensor_mult_int32);
tensor_in_output_range_64 =
std::max(tensor_in_output_range_64, lowest_quantized);
tensor_in_output_range_64 =
std::min(tensor_in_output_range_64, highest_quantized);
const int32_t tensor_in_output_range =
static_cast<int32>(tensor_in_output_range_64);
output[i] = vector_in_output_range + tensor_in_output_range;
}
}
#endif
}
template <class T, class Toutput>
class QuantizedAddOp : public OpKernel {
public:
explicit QuantizedAddOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* context) override {
const Tensor& x = context->input(0);
const Tensor& y = context->input(1);
const Tensor& min_x_tensor = context->input(2);
const Tensor& max_x_tensor = context->input(3);
const Tensor& min_y_tensor = context->input(4);
const Tensor& max_y_tensor = context->input(5);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_x_tensor.shape()),
errors::InvalidArgument("`min_x` must be rank 0 but is rank ",
min_x_tensor.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_x_tensor.shape()),
errors::InvalidArgument("`max_x` must be rank 0 but is rank ",
max_x_tensor.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(min_y_tensor.shape()),
errors::InvalidArgument("`min_y` must be rank 0 but is rank ",
min_y_tensor.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(max_y_tensor.shape()),
errors::InvalidArgument("`max_y` must be rank 0 but is rank ",
max_y_tensor.dims()));
const float min_x = min_x_tensor.scalar<float>()();
const float max_x = max_x_tensor.scalar<float>()();
const float min_y = min_y_tensor.scalar<float>()();
const float max_y = max_y_tensor.scalar<float>()();
BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape()));
if (!bcast.IsValid()) {
context->SetStatus(errors::InvalidArgument(
"Incompatible shapes: ", x.shape().DebugString(), " vs. ",
y.shape().DebugString()));
return;
}
Tensor* z;
OP_REQUIRES_OK(context, context->allocate_output(
0, BCast::ToShape(bcast.output_shape()), &z));
OP_REQUIRES(context, (max_x > min_x),
errors::InvalidArgument("max_x must be larger than min_x."));
OP_REQUIRES(context, (max_y > min_y),
errors::InvalidArgument("max_y must be larger than min_y."));
const T* x_data = x.flat<T>().data();
const T* y_data = y.flat<T>().data();
Toutput* z_data = z->flat<Toutput>().data();
const float smallest_min = std::min(min_x, min_y);
const float largest_max = std::max(max_x, max_y);
const float biggest_range =
std::max(std::abs(smallest_min), std::abs(largest_max));
const float output_range = (biggest_range * (1 << 14));
const float min_z_value = -output_range;
const float max_z_value = output_range;
const int ndims = bcast.x_reshape().size();
if (ndims <= 1) {
if (x.NumElements() == 1) {
ScalarAddition<T, Toutput>(context, y_data, min_y, max_y,
y.NumElements(), x_data[0], min_x, max_x,
min_z_value, max_z_value, z_data);
} else if (y.NumElements() == 1) {
ScalarAddition<T, Toutput>(context, x_data, min_x, max_x,
x.NumElements(), y_data[0], min_y, max_y,
min_z_value, max_z_value, z_data);
} else {
VectorAddition<T, Toutput>(context, x_data, min_x, max_x, y_data, min_y,
max_y, x.NumElements(), min_z_value,
max_z_value, z_data);
}
} else if (ndims == 2) {
const T* vector_data;
int64_t vector_num_elements;
float vector_min;
float vector_max;
const T* tensor_data;
int64_t tensor_num_elements;
float tensor_min;
float tensor_max;
if (x.NumElements() < y.NumElements()) {
vector_data = x_data;
vector_num_elements = x.NumElements();
vector_min = min_x;
vector_max = max_x;
tensor_data = y_data;
tensor_num_elements = y.NumElements();
tensor_min = min_y;
tensor_max = max_y;
} else {
vector_data = y_data;
vector_num_elements = y.NumElements();
vector_min = min_y;
vector_max = max_y;
tensor_data = x_data;
tensor_num_elements = x.NumElements();
tensor_min = min_x;
tensor_max = max_x;
}
OP_REQUIRES(context, vector_num_elements > 0,
errors::InvalidArgument("Must have some elements to add"));
VectorTensorAddition<T, Toutput>(
vector_data, vector_min, vector_max, vector_num_elements, tensor_data,
tensor_min, tensor_max, tensor_num_elements, min_z_value, max_z_value,
z_data);
} else {
LOG(INFO) << "ndims=" << ndims;
LOG(INFO) << "bcast.x_reshape()="
<< TensorShape(bcast.x_reshape()).DebugString();
LOG(INFO) << "bcast.y_reshape()="
<< TensorShape(bcast.y_reshape()).DebugString();
LOG(INFO) << "bcast.x_bcast()="
<< TensorShape(bcast.x_bcast()).DebugString();
LOG(INFO) << "bcast.y_bcast()="
<< TensorShape(bcast.y_bcast()).DebugString();
context->SetStatus(errors::Unimplemented(
"Broadcast between ", context->input(0).shape().DebugString(),
" and ", context->input(1).shape().DebugString(),
" is not supported yet."));
return;
}
Tensor* z_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min));
z_min->flat<float>()(0) = min_z_value;
Tensor* z_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max));
z_max->flat<float>()(0) = max_z_value;
}
};
REGISTER_KERNEL_BUILDER(Name("QuantizedAdd")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T1")
.TypeConstraint<quint8>("T2")
.TypeConstraint<qint32>("Toutput"),
QuantizedAddOp<quint8, qint32>);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace ops {
namespace {
void TestAdd(const std::vector<int64_t>& x_shape,
const std::vector<float>& x_values, float x_min_value,
float x_max_value, const std::vector<int64_t>& y_shape,
const std::vector<float>& y_values, float y_min_value,
float y_max_value, const std::vector<int64_t>& expected_shape,
const std::vector<float>& expected_values, double tolerance) {
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Tensor x_quantized_tensor(DT_QUINT8, x_float_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(x_float_tensor, x_min_value,
x_max_value, &x_quantized_tensor);
Output x =
Const(root.WithOpName("x"), Input::Initializer(x_quantized_tensor));
Output x_min = Const(root.WithOpName("x_min"), x_min_value);
Output x_max = Const(root.WithOpName("x_max"), x_max_value);
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Tensor y_quantized_tensor(DT_QUINT8, y_float_tensor.shape());
FloatTensorToQuantizedInPlace<quint8>(y_float_tensor, y_min_value,
y_max_value, &y_quantized_tensor);
Output y =
Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor));
Output y_min = Const(root.WithOpName("y_min"), y_min_value);
Output y_max = Const(root.WithOpName("y_max"), y_max_value);
ops::QuantizedAdd add = ops::QuantizedAdd(root.WithOpName("add"), x, y, x_min,
x_max, y_min, y_max);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{add.z, add.min_z, add.max_z}, &outputs));
const Tensor& z_quantized = outputs[0];
const float z_min = outputs[1].flat<float>()(0);
const float z_max = outputs[2].flat<float>()(0);
Tensor z_float = QuantizedTensorToFloat<qint32>(z_quantized, z_min, z_max);
Tensor expected_z_float(DT_FLOAT, TensorShape(expected_shape));
test::FillValues<float>(&expected_z_float, expected_values);
test::ExpectTensorNear<float>(expected_z_float, z_float, tolerance);
}
void TestAddShape(const std::vector<int64_t>& x_shape,
const std::vector<int64_t>& y_shape) {
const size_t x_num_elements = TensorShape(x_shape).num_elements();
std::vector<float> x_values(x_num_elements);
for (int i = 0; i < x_num_elements; ++i) {
x_values[i] = i % 256;
}
const float x_min_value = 0.0f;
const float x_max_value = 256.0f;
const size_t y_num_elements = TensorShape(y_shape).num_elements();
std::vector<float> y_values(y_num_elements);
for (int i = 0; i < y_num_elements; ++i) {
y_values[i] = ((i + 23) % 123) - 50;
}
const float y_min_value = -150.0f;
const float y_max_value = 150.0f;
Scope root = Scope::NewRootScope();
Tensor x_float_tensor(DT_FLOAT, TensorShape(x_shape));
test::FillValues<float>(&x_float_tensor, x_values);
Output x = Const(root.WithOpName("x"), Input::Initializer(x_float_tensor));
Tensor y_float_tensor(DT_FLOAT, TensorShape(y_shape));
test::FillValues<float>(&y_float_tensor, y_values);
Output y = Const(root.WithOpName("y"), Input::Initializer(y_float_tensor));
Add add = Add(root.WithOpName("add"), x, y);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(), {add.z}, &outputs));
const Tensor& expected_values_tensor = outputs[0];
const float* expected_values_data =
expected_values_tensor.flat<float>().data();
std::vector<float> expected_values(
expected_values_data,
expected_values_data + expected_values_tensor.NumElements());
std::vector<int64_t> expected_shape;
for (const int64_t dim : expected_values_tensor.shape().dim_sizes()) {
expected_shape.push_back(dim);
}
TestAdd(x_shape, x_values, x_min_value, x_max_value, y_shape, y_values,
y_min_value, y_max_value, expected_shape, expected_values, 256.0);
}
void TimeAdd(const std::vector<int64_t>& x_shape,
const std::vector<int64_t>& y_shape, int64_t iterations) {
TestAddShape(x_shape, y_shape);
Scope root = Scope::NewRootScope();
Tensor x_quantized_tensor(DT_QUINT8, TensorShape(x_shape));
Output placeholder = Placeholder(root.WithOpName("placeholder"), DT_QUINT8);
Output x_min = Const(root.WithOpName("x_min"), 0.0f);
Output x_max = Const(root.WithOpName("x_max"), 1.0f);
Tensor y_quantized_tensor(DT_QUINT8, TensorShape(y_shape));
Output y =
Const(root.WithOpName("y"), Input::Initializer(y_quantized_tensor));
Output y_min = Const(root.WithOpName("y_min"), 0.0f);
Output y_max = Const(root.WithOpName("y_max"), 1.0f);
ops::QuantizedAdd add = ops::QuantizedAdd(root.WithOpName("add"), placeholder,
y, x_min, x_max, y_min, y_max);
TF_EXPECT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
int64_t total_duration = 0;
for (int i = 0; i < iterations; ++i) {
const int64_t start_time = Env::Default()->NowMicros();
TF_EXPECT_OK(session.Run({{placeholder, x_quantized_tensor}},
{add.z, add.min_z, add.max_z}, &outputs));
const int64_t end_time = Env::Default()->NowMicros();
total_duration += end_time - start_time;
}
const int64_t one_run_duration = total_duration / iterations;
const int64_t num_ops = outputs[0].NumElements();
const double million_ops_per_second =
(iterations * num_ops) / static_cast<double>(total_duration);
LOG(INFO) << "TimeAdd: " << TensorShape(x_shape).DebugString() << " * "
<< TensorShape(y_shape).DebugString()
<< ": iterations=" << iterations
<< ", MOps/s=" << million_ops_per_second
<< ", one_run_duration=" << one_run_duration
<< ", total_duration=" << total_duration;
}
void TestManualScalar() {
TestAdd(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {1}, {10.0f}, -100.0f, 100.0f, {10},
{11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
1.0f);
TestAdd(
{1}, {10.0f}, -100.0f, 100.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {10},
{11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
1.0f);
}
void TestScalar() {
TestAddShape({100}, {1});
TestAddShape({1}, {100});
}
void TestManualVector() {
TestAdd({10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f},
0.0f, 10.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {10},
{2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f},
1.0f);
}
void TestVector() { TestAddShape({100}, {100}); }
void TestManualVectorPlusTensor() {
TestAdd(
{10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {2, 10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {2, 10},
{2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f,
12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f},
1.0f);
TestAdd({2, 10}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
8.0f, 9.0f, 10.0f, 11.0f, 12.0f, 13.0f, 14.0f,
15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {10},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}, 0.0f,
10.0f, {2, 10}, {2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f,
16.0f, 18.0f, 20.0f, 12.0f, 14.0f, 16.0f, 18.0f,
20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f},
1.0f);
TestAdd(
{5, 2}, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f},
0.0f, 10.0f, {2, 5, 2},
{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f,
11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f, 18.0f, 19.0f, 20.0f},
0.0f, 20.0f, {2, 5, 2},
{2.0f, 4.0f, 6.0f, 8.0f, 10.0f, 12.0f, 14.0f, 16.0f, 18.0f, 20.0f,
12.0f, 14.0f, 16.0f, 18.0f, 20.0f, 22.0f, 24.0f, 26.0f, 28.0f, 30.0f},
1.0f);
}
void TestVectorPlusTensor() {
TestAddShape({100}, {2, 100});
TestAddShape({2, 100}, {100});
TestAddShape({5, 2}, {2, 5, 2});
}
void BenchmarkTensorScalar() {
TimeAdd({200}, {1}, 1000);
TimeAdd({10000}, {1}, 100);
TimeAdd({1000000}, {1}, 10);
TimeAdd({10000000}, {1}, 1);
}
void BenchmarkVector() {
TimeAdd({200}, {200}, 1000);
TimeAdd({10000}, {10000}, 100);
TimeAdd({1000000}, {1000000}, 10);
TimeAdd({10000000}, {10000000}, 1);
}
void BenchmarkVectorPlusTensor() {
TimeAdd({10, 20}, {20}, 100);
TimeAdd({10, 1000}, {1000}, 10);
TimeAdd({1000, 1000}, {1000}, 1);
TimeAdd({10000, 1000}, {1000}, 1);
TimeAdd({100, 100}, {100}, 10);
TimeAdd({10000, 100}, {100}, 1);
TimeAdd({100000, 100}, {100}, 1);
}
}
}
}
#define RUN_TEST(t) \
TEST(QuantizedAddOpTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestManualScalar);
RUN_TEST(TestManualVector);
RUN_TEST(TestManualVectorPlusTensor);
RUN_TEST(TestScalar);
RUN_TEST(TestVector);
RUN_TEST(TestVectorPlusTensor);
#if defined(__ANDROID__)
RUN_TEST(BenchmarkTensorScalar);
RUN_TEST(BenchmarkVector);
RUN_TEST(BenchmarkVectorPlusTensor);
#endif
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_add_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_add_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c4d93f8c-26b9-4357-95af-c244f2433a63 | cpp | tensorflow/tensorflow | simple_planner | tensorflow/lite/simple_planner.cc | tensorflow/lite/simple_planner_test.cc | #include "tensorflow/lite/simple_planner.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
}
SimplePlanner::SimplePlanner(TfLiteContext* context,
std::unique_ptr<GraphInfo> graph_info)
: context_(context), graph_info_(std::move(graph_info)) {}
SimplePlanner::~SimplePlanner() { FreeAllAllocations(); }
void SimplePlanner::FreeAllAllocations() {
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
allocs_[i].free();
}
}
TfLiteStatus SimplePlanner::ResetAllocations() {
FreeAllAllocations();
allocs_.clear();
allocs_.resize(graph_info_->num_tensors());
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
if (allocs_[i].node > node && allocs_[i].size > 0) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::PlanAllocations() {
TF_LITE_ENSURE_STATUS(ResetAllocations());
alloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
std::vector<int> refcounts(graph_info_->num_tensors(), 0);
auto allocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] != kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
alloc_node_[tensor] = node;
return kTfLiteOk;
};
auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
if (alloc_node_[tensor] == kNodeNotAssigned) {
return kTfLiteOk;
}
TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
dealloc_node_[tensor] = node;
return kTfLiteOk;
};
for (int tensor_index : graph_info_->outputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
for (int tensor_index : graph_info_->variables()) {
refcounts[tensor_index]++;
TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
for (int tensor_index : graph_info_->inputs()) {
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
}
}
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]++;
}
}
}
for (size_t i = 0; i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_outputs = node.outputs;
for (int j = 0; j < node_outputs->size; ++j) {
int tensor_index = node_outputs->data[j];
TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
}
TfLiteIntArray* node_inputs = node.inputs;
for (int j = 0; j < node_inputs->size; ++j) {
int tensor_index = node_inputs->data[j];
if (tensor_index != kTfLiteOptionalTensor) {
refcounts[tensor_index]--;
if (refcounts[tensor_index] == 0) {
TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
}
}
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ExecuteAllocations(int first_node, int last_node) {
alloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
dealloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
allocs_.resize(graph_info_->num_tensors());
const size_t num_execution_nodes = graph_info_->num_execution_nodes();
for (size_t i = first_node;
i <= static_cast<size_t>(last_node) && i < num_execution_nodes; ++i) {
const TfLiteNode& node = graph_info_->node(i);
TfLiteIntArray* node_temporaries = node.temporaries;
for (int j = 0; j < node_temporaries->size; ++j) {
int tensor_index = node_temporaries->data[j];
alloc_node_[tensor_index] = i;
dealloc_node_[tensor_index] = i;
}
}
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
bool allocated = false;
if (alloc_node_[i] >= first_node && alloc_node_[i] <= last_node) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[i].size != 0) {
allocs_[i].free();
}
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
} else if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
allocs_[i].size == 0) {
allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
}
}
if (allocated) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ReleaseNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
allocs_[i].free();
tensor.data.raw = nullptr;
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::AcquireNonPersistentMemory() {
const int num_tensors = static_cast<int>(graph_info_->num_tensors());
TfLiteTensor* tensors = graph_info_->tensors();
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor& tensor = tensors[i];
if (tensor.allocation_type == kTfLiteArenaRw) {
TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
}
}
return kTfLiteOk;
}
TfLiteStatus SimplePlanner::ResolveTensorAllocation(int tensor_index) {
TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
if (tensor.allocation_type == kTfLiteArenaRw) {
if (allocs_[tensor_index].size != 0) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
}
if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
tensor.data.raw = allocs_[tensor_index].ptr;
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/simple_planner.h"
#include <algorithm>
#include <cstdarg>
#include <initializer_list>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
namespace tflite {
namespace {
class TestOp {
public:
TestOp(std::initializer_list<int> inputs, std::initializer_list<int> outputs,
std::initializer_list<int> temporaries)
: inputs_(inputs), outputs_(outputs), temporaries_(temporaries) {}
const std::vector<int>& inputs() const { return inputs_; }
const std::vector<int>& outputs() const { return outputs_; }
const std::vector<int>& temporaries() const { return temporaries_; }
const TfLiteRegistration& registration() const { return registration_; }
private:
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> temporaries_;
TfLiteRegistration registration_{};
};
class TestGraph {
public:
TestGraph(std::initializer_list<int> inputs,
std::initializer_list<TestOp> nodes,
std::initializer_list<int> outputs)
: inputs_(inputs), outputs_(outputs) {
int max_tensor_index = 0;
for (int t : inputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (int t : outputs) {
max_tensor_index = std::max(max_tensor_index, t);
}
for (const auto& node : nodes) {
auto int_array = [](const std::vector<int>& x) {
TfLiteIntArray* lite = TfLiteIntArrayCreate(x.size());
for (size_t i = 0; i < x.size(); i++) lite->data[i] = x[i];
return lite;
};
registrations_.push_back(node.registration());
nodes_.push_back(TfLiteNode());
nodes_.back().inputs = int_array(node.inputs());
for (int t : node.inputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().outputs = int_array(node.outputs());
for (int t : node.outputs()) {
max_tensor_index = std::max(max_tensor_index, t);
}
nodes_.back().temporaries = int_array(node.temporaries());
for (int t : node.temporaries()) {
max_tensor_index = std::max(max_tensor_index, t);
}
}
for (int i = 0; i <= max_tensor_index; ++i) {
tensors_.push_back(TfLiteTensor());
tensors_.back().allocation_type = kTfLiteArenaRw;
tensors_.back().bytes = (i + 1) * 3;
}
}
~TestGraph() {
for (auto node : nodes_) {
TfLiteIntArrayFree(node.inputs);
TfLiteIntArrayFree(node.outputs);
TfLiteIntArrayFree(node.temporaries);
}
}
const std::vector<TfLiteNode>& nodes() { return nodes_; }
std::vector<TfLiteTensor>* tensors() { return &tensors_; }
const std::vector<int>& inputs() { return inputs_; }
const std::vector<int>& outputs() { return outputs_; }
const std::vector<int>& variables() { return variables_; }
const std::vector<TfLiteRegistration>& registrations() {
return registrations_;
}
void SetVariables(const std::vector<int>& variables) {
variables_ = variables;
}
void Swap(TestGraph* other) {
std::swap(nodes_, other->nodes_);
std::swap(tensors_, other->tensors_);
std::swap(inputs_, other->inputs_);
std::swap(outputs_, other->outputs_);
std::swap(variables_, other->variables_);
}
private:
std::vector<TfLiteNode> nodes_;
std::vector<TfLiteTensor> tensors_;
std::vector<TfLiteRegistration> registrations_;
std::vector<int> inputs_;
std::vector<int> outputs_;
std::vector<int> variables_;
};
class TestGraphInfo : public GraphInfo {
public:
explicit TestGraphInfo(TestGraph* graph) : graph_(graph) {}
size_t num_tensors() const override { return graph_->tensors()->size(); }
const TfLiteRegistration& registration(size_t index) const override {
return graph_->registrations()[index];
}
TfLiteTensor* tensor(size_t index) override {
return &graph_->tensors()->at(index);
}
TfLiteTensor* tensors() override { return graph_->tensors()->data(); }
size_t num_execution_nodes() const override { return graph_->nodes().size(); }
size_t num_total_nodes() const override { return graph_->nodes().size(); }
const TfLiteNode& node(size_t index) const override {
return graph_->nodes()[index];
}
size_t node_index(size_t index) const override { return index; }
const std::vector<int>& inputs() const override { return graph_->inputs(); }
const std::vector<int>& outputs() const override { return graph_->outputs(); }
const std::vector<int>& variables() const override {
return graph_->variables();
}
private:
TestGraph* graph_;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
LOG(INFO) << temp_buffer;
}
class SimplePlannerTest : public ::testing::Test {
protected:
void SetGraph(TestGraph* graph, bool preserve_all_tensors = false) {
graph_ = graph;
context_.ReportError = ReportError;
planner_ = std::make_unique<SimplePlanner>(
&context_, std::unique_ptr<GraphInfo>(new TestGraphInfo(graph)));
CHECK(planner_->ResetAllocations() == kTfLiteOk);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void SwapGraph(TestGraph* graph) {
graph_->Swap(graph);
CHECK(planner_->PlanAllocations() == kTfLiteOk);
}
void Execute(int start, int end) {
CHECK(planner_->ExecuteAllocations(start, end) == kTfLiteOk);
}
void ReleaseNonPersistentMemory() {
CHECK(planner_->ReleaseNonPersistentMemory() == kTfLiteOk);
}
void AcquireNonPersistentMemory() {
CHECK(planner_->AcquireNonPersistentMemory() == kTfLiteOk);
}
void ResetAllocationsAfter(int node) {
CHECK(planner_->ResetAllocationsAfter(node) == kTfLiteOk);
}
bool HasNonPersistentMemory() {
return planner_ && planner_->HasNonPersistentMemory();
}
bool IsAllocated(int tensor_index) {
return (*graph_->tensors())[tensor_index].data.raw != nullptr;
}
TfLiteContext context_;
TestGraph* graph_;
std::unique_ptr<SimplePlanner> planner_;
};
TEST_F(SimplePlannerTest, EmptyGraph) {
TestGraph graph({}, {}, {});
SetGraph(&graph);
Execute(0, 10);
}
TEST_F(SimplePlannerTest, GraphWithNoOps) {
TestGraph graph({0, 10}, {}, {5, 11});
SetGraph(&graph);
Execute(0, 10);
EXPECT_FALSE(IsAllocated(5));
EXPECT_FALSE(IsAllocated(11));
}
TEST_F(SimplePlannerTest, ZeroSizedTensors) {
TestGraph graph({1}, {{{1}, {2}, {}}}, {2});
(*graph.tensors())[1].bytes = 0;
SetGraph(&graph);
ASSERT_EQ(planner_->ExecuteAllocations(0, 10), kTfLiteOk);
EXPECT_FALSE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
}
TEST_F(SimplePlannerTest, SimpleGraph) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphInputsPreserved) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithTemporary) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_FALSE(IsAllocated(5));
}
TEST_F(SimplePlannerTest, SimpleGraphWithPersistentResetAllocationsAfter) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4}, {5}},
{{4}, {3}, {}}
},
{3});
(*graph.tensors())[5].allocation_type = kTfLiteArenaRwPersistent;
SetGraph(&graph);
Execute(0, 10);
void* tensor5_ptr = (*graph.tensors())[5].data.raw;
ResetAllocationsAfter(0);
EXPECT_TRUE(IsAllocated(0));
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_FALSE(IsAllocated(3));
EXPECT_FALSE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
Execute(0, 10);
EXPECT_TRUE(tensor5_ptr == (*graph.tensors())[5].data.raw);
}
TEST_F(SimplePlannerTest, SimpleGraphOptionalOutput) {
TestGraph graph({0, 1},
{
{{0, 1}, {2}, {}},
{{2, 0}, {4, 5}, {}},
{{4, 5}, {3}, {}}
},
{-1, 3});
SetGraph(&graph);
Execute(0, 10);
EXPECT_TRUE(IsAllocated(1));
EXPECT_TRUE(IsAllocated(2));
EXPECT_TRUE(IsAllocated(3));
EXPECT_TRUE(IsAllocated(4));
EXPECT_TRUE(IsAllocated(5));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_planner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/simple_planner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9b2eab20-5371-4a38-8c1d-c212f158648d | cpp | tensorflow/tensorflow | alias_passthrough_params | third_party/xla/xla/service/gpu/transforms/alias_passthrough_params.cc | third_party/xla/xla/service/gpu/transforms/alias_passthrough_params_test.cc | #include "xla/service/gpu/transforms/alias_passthrough_params.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AliasPassthroughParams::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const HloInstruction* root = module->entry_computation()->root_instruction();
if (module->entry_computation()->num_parameters() == 0 ||
root->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_set<int64_t> used_params;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (root->operand(i)->opcode() == HloOpcode::kParameter &&
used_params.count(root->operand(i)->parameter_number()) == 0) {
VLOG(2) << "Parameter " << root->operand(i)->parameter_number()
<< " with shape " << root->operand(i)->shape().ToString()
<< " in module " << module->name()
<< " is passed-through to root tuple element " << i << ": "
<< root->shape().ToString();
if (module->input_output_alias_config().OutputHasAlias({i}) ||
module->input_output_alias_config().ParameterHasAlias(
root->operand(i)->parameter_number(), {})) {
VLOG(2) << "Skip setting the above pass-through alias as an alias may"
<< " have been set up for alising resource update.";
continue;
}
TF_RETURN_IF_ERROR(module->input_output_alias_config().SetUpAlias(
{i},
root->operand(i)->parameter_number(),
{}));
used_params.insert(root->operand(i)->parameter_number());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/alias_passthrough_params.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
class AliasPassthroughParamsTest : public HloTestBase {};
TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number);
}
TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p0)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
}
TEST_F(AliasPassthroughParamsTest, PresetAliases) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
auto& preset_alias = module->input_output_alias_config();
TF_EXPECT_OK(preset_alias.SetUpAlias({1},
0,
{}));
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_result = module->input_output_alias_config();
EXPECT_EQ(1, alias_result.GetAliasedParameter({2})->parameter_number);
EXPECT_FALSE(alias_result.OutputHasAlias({0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/alias_passthrough_params.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/alias_passthrough_params_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8bb040b7-ee6d-4626-b127-24058237c255 | cpp | tensorflow/tensorflow | string_util | tensorflow/compiler/mlir/tensorflow/utils/string_util.cc | tensorflow/lite/string_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/string_util.h"
#include <ostream>
#include <string>
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Operation.h"
namespace tensorflow {
std::string OpAsString(mlir::Operation& op) {
std::string out;
llvm::raw_string_ostream op_stream(out);
op.print(op_stream, mlir::OpPrintingFlags()
.elideLargeElementsAttrs()
.assumeVerified()
.skipRegions()
.printGenericOpForm());
return out;
}
std::string AttrAsString(mlir::Attribute& attr) {
std::string out;
llvm::raw_string_ostream attr_stream(out);
attr.print(attr_stream);
return out;
}
std::ostream& operator<<(std::ostream& o, const LoggableOperation& op) {
return o << OpAsString(op.v);
}
std::ostream& operator<<(std::ostream& o, const LoggableAttribute& attr) {
return o << AttrAsString(attr.v);
}
std::ostream& operator<<(std::ostream& o, const LoggableStringRef& ref) {
return o << ref.v.str();
}
} | #include "tensorflow/lite/string_util.h"
#include <stdint.h>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/string_type.h"
namespace tflite {
TEST(StringUtil, TestStringUtil) {
Interpreter interpreter;
interpreter.AddTensors(3);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
TfLiteTensor* t1 = interpreter.tensor(1);
t1->type = kTfLiteString;
t1->allocation_type = kTfLiteDynamic;
union {
char raw_bytes[15];
struct {
int32_t num_strs;
int32_t offsets[2];
char str_data[3];
} tensor_data;
} data;
data.tensor_data = {1, {12, 15}, {'X', 'Y', 'Z'}};
TfLiteQuantization quant;
quant.type = kTfLiteNoQuantization;
quant.params = nullptr;
interpreter.SetTensorParametersReadOnly(
2, kTfLiteString, "", {1}, quant, data.raw_bytes, sizeof(data.raw_bytes));
TfLiteTensor* t2 = interpreter.tensor(2);
ASSERT_EQ(interpreter.AllocateTensors(), kTfLiteOk);
char s0[] = "ABC";
string s1 = "DEFG";
char s2[] = "";
DynamicBuffer buf0;
ASSERT_EQ(buf0.AddString(s0, 3), kTfLiteOk);
DynamicBuffer buf1;
ASSERT_EQ(buf1.AddString(s1.data(), s1.length()), kTfLiteOk);
ASSERT_EQ(buf0.AddString(s2, 0), kTfLiteOk);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 2;
new_shape->data[1] = 1;
buf0.WriteToTensor(t0, new_shape);
buf1.WriteToTensorAsVector(t1);
EXPECT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
EXPECT_EQ(t1->dims->size, 1);
EXPECT_EQ(t1->dims->data[0], 1);
ASSERT_EQ(GetStringCount(t0), 2);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "ABC");
str_ref = GetString(t0, 1);
ASSERT_EQ(string(str_ref.str, str_ref.len), "");
ASSERT_EQ(t0->bytes, 19);
ASSERT_EQ(GetStringCount(t1), 1);
str_ref = GetString(t1, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "DEFG");
ASSERT_EQ(t1->bytes, 16);
ASSERT_EQ(GetStringCount(t2), 1);
str_ref = GetString(t2, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), "XYZ");
ASSERT_EQ(t2->bytes, 15);
}
TEST(StringUtil, AddStringOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string(max_size + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, AddStringToFullBufferOverflow32Length) {
const size_t max_size = 100;
DynamicBuffer buf{max_size};
std::string big_string((max_size / 2) + 1, 'A');
ASSERT_EQ(buf.AddString({big_string.data(), big_string.length()}), kTfLiteOk);
EXPECT_EQ(buf.AddString({big_string.data(), big_string.length()}),
kTfLiteError);
}
TEST(StringUtil, TruncatesCharDataToLen) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
char fake_big[] = "ABCADASDA";
ASSERT_EQ(buf.AddString({fake_big, 3}), kTfLiteOk);
buf.WriteToTensorAsVector(t0);
StringRef added_string = GetString(t0, 0);
EXPECT_EQ(added_string.len, 3);
EXPECT_EQ(string(added_string.str, 3), "ABC");
}
TEST(StringUtil, TestAddJoinedStringCharSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, ' ');
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " ABC DEFG XYZ");
ASSERT_EQ(t0->bytes, 26);
}
TEST(StringUtil, TestAddJoinedStringStringRefSeparator) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
char s[] = " - ";
char s0[] = "";
char s1[] = "ABC";
char s2[] = "DEFG";
char s3[] = "";
char s4[] = "XYZ";
DynamicBuffer buf;
buf.AddJoinedString({{s0, 0}, {s1, 3}, {s2, 4}, {s3, 0}, {s4, 3}}, {s, 3});
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 1);
StringRef str_ref;
str_ref = GetString(t0, 0);
ASSERT_EQ(string(str_ref.str, str_ref.len), " - ABC - DEFG - - XYZ");
ASSERT_EQ(t0->bytes, 34);
}
TEST(StringUtil, TestEmptyList) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
DynamicBuffer buf;
buf.WriteToTensorAsVector(t0);
ASSERT_EQ(GetStringCount(t0), 0);
ASSERT_EQ(t0->bytes, 8);
}
TEST(StringUtil, TestShapes) {
Interpreter interpreter;
interpreter.AddTensors(1);
TfLiteTensor* t0 = interpreter.tensor(0);
t0->type = kTfLiteString;
t0->allocation_type = kTfLiteDynamic;
t0->dims = TfLiteIntArrayCreate(2);
t0->dims->data[0] = 2;
t0->dims->data[1] = 1;
DynamicBuffer buf;
buf.AddString("ABC", 3);
buf.AddString("X", 1);
buf.WriteToTensor(t0, nullptr);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 2);
EXPECT_EQ(t0->dims->data[1], 1);
auto new_shape = TfLiteIntArrayCreate(2);
new_shape->data[0] = 1;
new_shape->data[1] = 2;
buf.WriteToTensor(t0, new_shape);
ASSERT_EQ(t0->dims->size, 2);
EXPECT_EQ(t0->dims->data[0], 1);
EXPECT_EQ(t0->dims->data[1], 2);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/string_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/string_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5eb69629-495f-4efe-8e13-19cb96a2dada | cpp | google/cel-cpp | data_interface | common/internal/data_interface.h | common/internal/data_interface_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_INTERNAL_DATA_INTERFACE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_INTERNAL_DATA_INTERFACE_H_
#include <type_traits>
#include "absl/base/attributes.h"
#include "common/native_type.h"
namespace cel {
class TypeInterface;
class ValueInterface;
namespace common_internal {
class DataInterface;
class DataInterface {
public:
DataInterface(const DataInterface&) = delete;
DataInterface(DataInterface&&) = delete;
virtual ~DataInterface() = default;
DataInterface& operator=(const DataInterface&) = delete;
DataInterface& operator=(DataInterface&&) = delete;
protected:
DataInterface() = default;
private:
friend class cel::TypeInterface;
friend class cel::ValueInterface;
friend struct NativeTypeTraits<DataInterface>;
virtual NativeTypeId GetNativeTypeId() const = 0;
};
}
template <>
struct NativeTypeTraits<common_internal::DataInterface> final {
static NativeTypeId Id(const common_internal::DataInterface& data_interface) {
return data_interface.GetNativeTypeId();
}
};
template <typename T>
struct NativeTypeTraits<
T, std::enable_if_t<std::conjunction_v<
std::is_base_of<common_internal::DataInterface, T>,
std::negation<std::is_same<T, common_internal::DataInterface>>>>>
final {
static NativeTypeId Id(const common_internal::DataInterface& data_interface) {
return NativeTypeTraits<common_internal::DataInterface>::Id(data_interface);
}
};
}
#endif | #include "common/internal/data_interface.h"
#include <memory>
#include "common/native_type.h"
#include "internal/testing.h"
namespace cel::common_internal {
namespace {
namespace data_interface_test {
class TestInterface final : public DataInterface {
private:
NativeTypeId GetNativeTypeId() const override {
return NativeTypeId::For<TestInterface>();
}
};
}
TEST(DataInterface, GetNativeTypeId) {
auto data = std::make_unique<data_interface_test::TestInterface>();
EXPECT_EQ(NativeTypeId::Of(*data),
NativeTypeId::For<data_interface_test::TestInterface>());
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/internal/data_interface.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/internal/data_interface_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
0587d70c-ea77-47c1-b1b0-a74f752d5522 | cpp | tensorflow/tensorflow | memory_bound_loop_optimizer | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer.cc | third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer_test.cc | #include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/heap_simulator/allocation_block.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace memory_space_assignment {
namespace {
std::optional<int64_t> GetInstructionIndex(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, int64_t>&
instructions_to_index) {
auto it = instructions_to_index.find(instruction);
return it == instructions_to_index.end() ? std::nullopt
: std::optional<int64_t>(it->second);
}
}
void LoopOptimizerBestFitHeap::CreateBufferInterval(
const AllocationBlock& allocation_block,
const AllocationBlock* colocated_with) {
buffer_intervals_[&allocation_block] =
BufferInterval({&allocation_block,
allocation_block.size,
allocation_block.inclusive_start_time,
allocation_block.end_time,
{},
colocated_with == nullptr});
if (colocated_with) {
buffer_intervals_[colocated_with].colocations.push_back(&allocation_block);
}
}
std::optional<HeapSimulator::Chunk>
LoopOptimizerBestFitHeap::MaybeFindChunkCandidate(
const AllocationBlock& allocation_block, int64_t preferred_offset) {
Chunk chunk_candidate = FindChunkCandidate(
buffer_intervals_[&allocation_block], preferred_offset);
if (chunk_candidate.chunk_end() <= size_limit_per_heap_) {
return chunk_candidate;
}
return std::nullopt;
}
std::optional<HeapSimulator::Chunk>
LoopOptimizerBestFitHeap::FindAndCommitChunkCandidate(
const AllocationBlock& allocation_block, int64_t preferred_offset) {
std::optional<Chunk> chunk =
MaybeFindChunkCandidate(allocation_block, preferred_offset);
if (chunk.has_value()) {
CommitChunk(buffer_intervals_[&allocation_block], chunk.value());
}
return chunk;
}
void LoopOptimizerBestFitHeap::RemoveChunk(int64_t start_time, int64_t end_time,
Chunk chunk) {
CHECK(interval_tree_.Remove(start_time, end_time, chunk));
}
void LoopOptimizerBestFitHeap::RemoveEvenChunks(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
std::optional<HeapSimulator::Chunk>& chunk) {
RemoveChunk(begin_idx_in_loop, end_idx_in_loop, chunk.value());
RemoveChunk(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, chunk.value());
}
void LoopOptimizerBestFitHeap::RemoveOddChunks(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
std::optional<HeapSimulator::Chunk>& chunk) {
RemoveChunk(begin_idx_in_loop + loop_size_, end_idx_in_loop + loop_size_,
chunk.value());
RemoveChunk(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, chunk.value());
}
void LoopOptimizerBestFitHeap::RemoveEvenOddChunkPair(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop,
EvenOddChunkPair& chunks) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_chunk, odd_chunk] = chunks;
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
RemoveOddChunks(begin_idx_in_loop, end_idx_in_loop, odd_chunk);
}
const AllocationBlock& LoopOptimizerBestFitHeap::GetAllocationBlock(
int64_t start_time, int64_t end_time, int64_t size) {
allocation_blocks_.push_back(
{start_time, end_time, size, static_cast<int64_t>(-1),
static_cast<int64_t>(-1),
static_cast<int64_t>(allocation_blocks_.size())});
return allocation_blocks_.back();
}
const AllocationBlock& LoopOptimizerBestFitHeap::CreateEvenAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block =
GetAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
return first_allocation_block;
}
const AllocationBlock& LoopOptimizerBestFitHeap::CreateOddAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block = GetAllocationBlock(
begin_idx_in_loop + loop_size_, end_idx_in_loop + loop_size_, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
return first_allocation_block;
}
void LoopOptimizerBestFitHeap::CheckAllocationIntervalValid(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop) const {
CHECK_LE(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(-1 * loop_size_, begin_idx_in_loop);
CHECK_LT(begin_idx_in_loop, loop_size_);
CHECK_LE(0, end_idx_in_loop);
CHECK_LT(end_idx_in_loop, 2 * loop_size_);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, 2 * loop_size_);
}
void LoopOptimizerBestFitHeap::ShiftAllocationIntervalIfRequired(
int64_t& begin_idx_in_loop, int64_t& end_idx_in_loop) const {
if (begin_idx_in_loop < 0) {
begin_idx_in_loop += loop_size_;
end_idx_in_loop += loop_size_;
}
}
EvenOddChunkPair LoopOptimizerBestFitHeap::FindEvenAndOddAllocationBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
std::pair<int64_t, int64_t> preferred_offsets) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_offset, odd_offset] = preferred_offsets;
const AllocationBlock& even_allocation =
CreateEvenAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
const AllocationBlock& odd_allocation =
CreateOddAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> even_chunk =
FindAndCommitChunkCandidate(even_allocation, even_offset);
if (!even_chunk.has_value()) {
return {std::nullopt, std::nullopt};
}
std::optional<HeapSimulator::Chunk> odd_chunk =
MaybeFindChunkCandidate(odd_allocation, odd_offset);
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
if (odd_chunk.has_value()) {
return {even_chunk, odd_chunk};
}
return {std::nullopt, std::nullopt};
}
EvenOddChunkPair LoopOptimizerBestFitHeap::AllocateEvenAndOddBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
std::pair<int64_t, int64_t> preferred_offsets) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
auto [even_offset, odd_offset] = preferred_offsets;
const AllocationBlock& even_allocation =
CreateEvenAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
const AllocationBlock& odd_allocation =
CreateOddAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> even_chunk =
FindAndCommitChunkCandidate(even_allocation, even_offset);
if (!even_chunk.has_value()) {
return {std::nullopt, std::nullopt};
}
std::optional<HeapSimulator::Chunk> odd_chunk =
FindAndCommitChunkCandidate(odd_allocation, odd_offset);
if (odd_chunk.has_value()) {
return {even_chunk, odd_chunk};
}
RemoveEvenChunks(begin_idx_in_loop, end_idx_in_loop, even_chunk);
return {std::nullopt, std::nullopt};
}
const AllocationBlock&
LoopOptimizerBestFitHeap::CreateSameEvenAndOddAllocationBlock(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size) {
const AllocationBlock& first_allocation_block =
GetAllocationBlock(begin_idx_in_loop, end_idx_in_loop, size);
CreateBufferInterval(first_allocation_block);
const AllocationBlock& second_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 1 * loop_size_,
end_idx_in_loop + 1 * loop_size_, size);
CreateBufferInterval(second_allocation_block, &first_allocation_block);
const AllocationBlock& third_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 2 * loop_size_,
end_idx_in_loop + 2 * loop_size_, size);
CreateBufferInterval(third_allocation_block, &first_allocation_block);
const AllocationBlock& fourth_allocation_block =
GetAllocationBlock(begin_idx_in_loop + 3 * loop_size_,
end_idx_in_loop + 3 * loop_size_, size);
CreateBufferInterval(fourth_allocation_block, &first_allocation_block);
return first_allocation_block;
}
EvenOddChunkPair LoopOptimizerBestFitHeap::FindSameEvenAndOddAllocationBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
int64_t preferred_offset) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, loop_size_);
const AllocationBlock& allocation = CreateSameEvenAndOddAllocationBlock(
begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> chunk =
MaybeFindChunkCandidate(allocation, preferred_offset);
return {chunk, chunk};
}
EvenOddChunkPair LoopOptimizerBestFitHeap::AllocateSameEvenAndOddBetween(
int64_t begin_idx_in_loop, int64_t end_idx_in_loop, int64_t size,
int64_t preferred_offset) {
CheckAllocationIntervalValid(begin_idx_in_loop, end_idx_in_loop);
ShiftAllocationIntervalIfRequired(begin_idx_in_loop, end_idx_in_loop);
CHECK_LE(end_idx_in_loop - begin_idx_in_loop + 1, loop_size_);
const AllocationBlock& allocation = CreateSameEvenAndOddAllocationBlock(
begin_idx_in_loop, end_idx_in_loop, size);
std::optional<HeapSimulator::Chunk> chunk =
FindAndCommitChunkCandidate(allocation, preferred_offset);
return {chunk, chunk};
}
std::string LoopOptimizerBestFitHeap::MemoryUsageToAsciiArt(
int64_t begin_iteration, int64_t end_iteration) const {
CHECK_LE(0, begin_iteration);
CHECK_LE(begin_iteration, end_iteration);
return interval_tree_.NodesOverlappingInTimeToAsciiArt(
loop_size_ * begin_iteration, loop_size_ * (end_iteration + 1) - 1,
loop_size_);
}
std::vector<int64_t> LoopOptimizerBestFitHeap::RemainingMemoryByTime() const {
std::vector<int64_t> memory_used_by_time =
interval_tree_.MemoryUsedInInterval(loop_size_ * 2, loop_size_ * 3 - 1);
std::vector<int64_t> remaining_memory_by_time(loop_size_);
for (int i = 0; i < loop_size_; ++i) {
remaining_memory_by_time[i] = size_limit_per_heap_ - memory_used_by_time[i];
}
return remaining_memory_by_time;
}
int64_t LoopOptimizerBestFitHeap::LastMemoryOffsetOccupied() const {
return interval_tree_.HeapSizeInInterval(loop_size_ * 2, loop_size_ * 4 - 1);
}
absl::StatusOr<std::unique_ptr<MemoryBoundLoopOptimizer>>
MemoryBoundLoopOptimizer::Create(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn) {
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer =
absl::WrapUnique(new MemoryBoundLoopOptimizer(
loop_start, loop_end, alternate_memory_size, options, hlo_live_range,
alias_analysis, cost_analysis, size_function,
reserved_scoped_memory_fn));
TF_RETURN_IF_ERROR(optimizer->Initialize());
return std::move(optimizer);
}
MemoryBoundLoopOptimizer::MemoryBoundLoopOptimizer(
int loop_start, int loop_end, uint64_t alternate_memory_size,
const MemoryBoundLoopOptimizerOptions& options,
const HloLiveRange& hlo_live_range, const HloAliasAnalysis& alias_analysis,
const CostAnalysis& cost_analysis,
const BufferValue::SizeFunction& size_function,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn)
: loop_start_(loop_start),
loop_end_(loop_end),
loop_size_(loop_end - loop_start),
alternate_memory_size_(alternate_memory_size),
options_(options),
hlo_live_range_(hlo_live_range),
alias_analysis_(alias_analysis),
cost_analysis_(cost_analysis),
size_function_(size_function),
reserved_scoped_memory_fn_(reserved_scoped_memory_fn) {}
absl::Status MemoryBoundLoopOptimizer::Initialize() {
const auto& instruction_sequence =
hlo_live_range_.flattened_instruction_sequence().instructions();
VLOG(3) << "MemoryBoundLoopOptimizer::Initialize, loop start: " << loop_start_
<< ", loop end: " << loop_end_ << ", loop size: " << loop_size_;
const HloComputation* loop_computation = nullptr;
int prev_iteration_start = loop_start_ - loop_size_;
int next_iteration_start = loop_start_ + loop_size_;
for (int i = 0; i < loop_size_; ++i) {
const HloInstruction* loop_inst = instruction_sequence[loop_start_ + i];
instructions_in_loop_[loop_inst] = i;
const HloInstruction* prev_iteration_inst =
instruction_sequence[prev_iteration_start + i];
instructions_in_prev_iteration_[prev_iteration_inst] = i;
const HloInstruction* next_iteration_inst =
instruction_sequence[next_iteration_start + i];
instructions_in_next_iteration_[next_iteration_inst] = i;
VLOG(3) << " inst in loop [" << (i) << "]: " << loop_inst->name();
if (!loop_computation) {
loop_computation = loop_inst->parent();
} else {
TF_RET_CHECK(loop_computation == loop_inst->parent());
}
remaining_memory_.push_back(
alternate_memory_size_ -
reserved_scoped_memory_fn_(loop_inst,
{},
{}));
}
std::set<const HloBuffer*> buffers_to_process;
for (const auto& [instruction, idx] : instructions_in_loop_) {
auto maybe_add_buffer = [&](const HloInstruction* instruction) {
return [this, &buffers_to_process, instruction](const Shape& subshape,
const ShapeIndex& index) {
if (!subshape.IsArray()) {
return;
}
const HloBuffer& buffer =
alias_analysis_.GetUniqueBufferAt(instruction, index);
if (buffers_to_process.find(&buffer) == buffers_to_process.end()) {
buffers_to_process.insert(&buffer);
}
};
};
ShapeUtil::ForEachSubshape(instruction->shape(),
maybe_add_buffer(instruction));
for (const HloInstruction* operand : instruction->operands()) {
ShapeUtil::ForEachSubshape(operand->shape(), maybe_add_buffer(operand));
}
}
for (const HloBuffer* buffer : buffers_to_process) {
MaybeCreateLoopValue(*buffer, loop_computation);
}
return absl::OkStatus();
}
void MemoryBoundLoopOptimizer::MaybeCreateLoopValue(
const HloBuffer& buffer, const HloComputation* loop_computation) {
loop_values_.push_back({});
LoopValue& loop_value = loop_values_.back();
float pos_bytes = 0;
float use_bytes = 0;
bool has_footer_consumer = false;
for (const HloValue* value : buffer.values()) {
for (const HloPosition& position : value->positions()) {
if (position.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(position.instruction, instructions_in_loop_);
std::optional<int64_t> prev_iteration_index;
if (loop_index) {
loop_value.loop_positions.push_back({*loop_index, position});
VLOG(3) << "Pos match: " << position.instruction->name() << " at "
<< *loop_index;
} else if ((prev_iteration_index = GetInstructionIndex(
position.instruction, instructions_in_prev_iteration_))) {
loop_value.prev_iteration_positions.push_back(
{*prev_iteration_index, position});
VLOG(3) << "Pos match (prev iteration): "
<< position.instruction->name() << " at "
<< *prev_iteration_index;
} else if (loop_value.prev_iteration_positions.empty() &&
loop_value.loop_positions.empty() &&
position.instruction->parent() == loop_computation &&
!loop_value.header_position) {
loop_value.header_position = position;
}
if (loop_index || prev_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OutputBytesAccessed(
*position.instruction, position.index);
pos_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
for (const HloUse& use : value->GetUses()) {
if (use.instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
std::optional<int64_t> loop_index =
GetInstructionIndex(use.instruction, instructions_in_loop_);
std::optional<int64_t> next_iteration_index;
if (loop_index) {
loop_value.loop_uses.push_back({*loop_index, use});
VLOG(3) << "Use match: " << use.instruction->name() << " at "
<< *loop_index;
} else if ((next_iteration_index = GetInstructionIndex(
use.instruction, instructions_in_next_iteration_))) {
loop_value.next_iteration_uses.push_back({*next_iteration_index, use});
VLOG(3) << "Use match (next iteration): " << use.instruction->name()
<< " at " << *next_iteration_index;
} else if (!loop_value.loop_positions.empty() ||
!loop_value.loop_uses.empty()) {
has_footer_consumer = true;
}
if (loop_index || next_iteration_index) {
float bytes_accessed = cost_analysis_.base_costs().OperandBytesAccessed(
*use.instruction, use.operand_number, use.operand_index);
use_bytes += bytes_accessed;
VLOG(3) << " accessed: " << bytes_accessed;
}
}
}
if ((!loop_value.loop_positions.empty() || !loop_value.loop_uses.empty()) &&
loop_value.prev_iteration_positions.empty()) {
loop_value.size = size_function_(**buffer.values().begin());
VLOG(3) << "Size: " << loop_value.size;
loop_value.allocation_type = LoopValue::AllocationType::kUnsupported;
auto position_compare = [](const std::pair<int64_t, HloPosition>& a,
const std::pair<int64_t, HloPosition>& b) {
return a.first < b.first;
};
auto use_compare = [](const std::pair<int64_t, HloUse>& a,
const std::pair<int64_t, HloUse>& b) {
return a.first < b.first;
};
absl::c_sort(loop_value.loop_positions, position_compare);
absl::c_sort(loop_value.prev_iteration_positions, position_compare);
absl::c_sort(loop_value.loop_uses, use_compare);
absl::c_sort(loop_value.next_iteration_uses, use_compare);
if (!loop_value.loop_positions.empty()) {
if (loop_value.next_iteration_uses.empty() &&
!loop_value.loop_uses.empty()) {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
} else if (!loop_value.next_iteration_uses.empty()) {
if (loop_value.next_iteration_uses.back().first >=
loop_value.loop_positions.front().first) {
loop_value.allocation_type =
LoopValue::AllocationType::kLoopCarriedDependence;
} else {
loop_value.allocation_type = LoopValue::AllocationType::kTemporary;
}
}
} else if (loop_value.header_position && !loop_value.loop_uses.empty()) {
if (loop_value.loop_uses.size() ==
loop_value.next_iteration_uses.size() &&
loop_value.loop_uses.front().first ==
loop_value.next_iteration_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPinned;
} else if (loop_value.next_iteration_uses.empty() ||
loop_value.next_iteration_uses.back().first <
loop_value.loop_uses.front().first) {
loop_value.allocation_type = LoopValue::AllocationType::kPrefetch;
}
}
VLOG(3) << "Allocation type "
<< LoopValue::AllocationTypeToString(loop_value.allocation_type);
VLOG(3) << "Pos bytes: " << pos_bytes << " use bytes: " << use_bytes;
float savings = pos_bytes + use_bytes;
if (loop_value.header_position) {
savings -= loop_value.size;
}
if (!loop_value.loop_positions.empty() && has_footer_consumer) {
savings -= loop_value.size;
}
loop_value.savings = savings;
loop_value.savings_per_byte = savings / loop_value.size;
VLOG(3) << "Savings: " << loop_value.savings;
VLOG(3) << "Savings per byte: " << loop_value.savings_per_byte;
for (const HloValue* value : buffer.values()) {
VLOG(3) << value->ToString();
}
loop_value.hlo_values = buffer.values();
} else {
loop_values_.pop_back();
}
}
void MemoryBoundLoopOptimizer::Optimize() {
SortLoopValues();
AllocateLoopValues();
PostProcess();
}
float MemoryBoundLoopOptimizer::CalculateExecutionTime() const {
std::vector<std::pair<const CopyAllocation*, float>> prefetches;
for (const LoopValue& value : loop_values_) {
if (!value.allocations.empty() &&
value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(
{static_cast<const CopyAllocation*>(value.allocations.back().get()),
cost_analysis_.GetAsyncCopyElapsed(
value.hlo_values.front()->shape())});
}
}
auto get_effective_done_time =
[&](int64_t copy_start_schedule_after,
int64_t copy_done_schedule_before) -> int64_t {
if (copy_start_schedule_after == loop_size_ - 1 &&
copy_done_schedule_before == 0) {
return 2 * loop_size_;
}
if (copy_start_schedule_after + 1 >= copy_done_schedule_before) {
return copy_done_schedule_before + loop_size_;
}
return copy_done_schedule_before;
};
absl::c_sort(
prefetches, [&](const std::pair<const CopyAllocation*, float>& a,
const std::pair<const CopyAllocation*, float>& b) {
return std::forward_as_tuple(
a.first->copy_start_schedule_after(),
get_effective_done_time(
a.first->copy_start_schedule_after(),
a.first->copy_done_schedule_before())) <
std::forward_as_tuple(b.first->copy_start_schedule_after(),
get_effective_done_time(
b.first->copy_start_schedule_after(),
b.first->copy_done_schedule_before()));
});
std::vector<std::optional<int>> required_prefetch_completions(loop_size_);
for (int i = 0; i < prefetches.size(); ++i) {
const auto& [prefetch, elapsed] = prefetches[i];
int required_prefetch_completion = i;
if (prefetch->copy_start_schedule_after() == loop_size_ - 1 &&
prefetch->copy_done_schedule_before() == 0) {
required_prefetch_completion -= 2 * prefetches.size();
} else if (prefetch->copy_start_schedule_after() + 1 >=
prefetch->copy_done_schedule_before()) {
required_prefetch_completion -= prefetches.size();
}
VLOG(3) << "Prefetch #" << i << " (elapsed " << elapsed
<< "): " << prefetch->ToString();
if (required_prefetch_completions[prefetch->copy_done_schedule_before()]) {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
std::max(
*required_prefetch_completions[prefetch
->copy_done_schedule_before()],
required_prefetch_completion);
} else {
required_prefetch_completions[prefetch->copy_done_schedule_before()] =
required_prefetch_completion;
}
VLOG(4)
<< "Required completion at " << prefetch->copy_done_schedule_before()
<< " = "
<< *required_prefetch_completions[prefetch
->copy_done_schedule_before()];
}
float result;
std::vector<float> bandwidth_idle_times;
std::vector<float> instructions_elapsed;
bandwidth_idle_times.reserve(loop_size_);
instructions_elapsed.reserve(loop_size_);
for (int i = 0; i < loop_size_; ++i) {
bandwidth_idle_times.push_back(GetBandwidthIdleTime(i));
instructions_elapsed.push_back(GetInstructionElapsed(i));
}
const int kNumIterations = 3;
std::vector<float> prefetch_remaining_elapsed_times(prefetches.size() *
kNumIterations);
int prefetch_start_index = 0;
int prefetch_done_index = 0;
int prefetch_completed_index = 0;
for (int iteration = 0; iteration < kNumIterations; ++iteration) {
float total_elapsed = 0;
float total_bandwidth_idle_time = 0;
float total_critical_prefetch = 0;
for (int i = 0; i < loop_size_; ++i) {
std::optional<int> required_prefetch_completion =
required_prefetch_completions[i];
if (required_prefetch_completion) {
int required_prefetch_done_index =
iteration * static_cast<int>(prefetches.size()) +
*required_prefetch_completion;
VLOG(4) << "Prefetch #"
<< ((*required_prefetch_completion + prefetches.size()) %
prefetches.size())
<< " (" << required_prefetch_done_index
<< ") is required to be completed at " << i;
for (; prefetch_done_index <= required_prefetch_done_index;
++prefetch_done_index) {
CHECK_LE(prefetch_done_index, prefetch_start_index);
if (prefetch_done_index == prefetch_completed_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_done_index];
VLOG(4) << "Prefetch #" << (prefetch_done_index % prefetches.size())
<< " (" << prefetch_done_index
<< ") did not complete, remaining elapsed = "
<< prefetch_remaining;
total_critical_prefetch += prefetch_remaining;
prefetch_remaining = 0;
++prefetch_completed_index;
}
}
}
float elapsed = instructions_elapsed[i];
total_elapsed += elapsed;
float bandwidth_idle_time = bandwidth_idle_times[i];
for (; prefetch_completed_index < prefetch_start_index;
++prefetch_completed_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_completed_index];
if (bandwidth_idle_time < prefetch_remaining) {
prefetch_remaining -= bandwidth_idle_time;
bandwidth_idle_time = 0;
VLOG(4) << "Prefetch #"
<< (prefetch_completed_index % prefetches.size()) << " ("
<< prefetch_completed_index << ") still ongoing at " << i
<< ", remaining elapsed = " << prefetch_remaining;
break;
}
bandwidth_idle_time -= prefetch_remaining;
prefetch_remaining = 0;
VLOG(4) << "Prefetch #"
<< (prefetch_completed_index % prefetches.size()) << " ("
<< prefetch_completed_index << ") completed at " << i
<< ", bandwidth idle time = " << bandwidth_idle_time;
}
if (bandwidth_idle_time > 0) {
VLOG(4) << "Bandwidth idle time at " << i << " = "
<< bandwidth_idle_time;
total_bandwidth_idle_time += bandwidth_idle_time;
}
for (; prefetch_start_index < (iteration + 1) * prefetches.size() &&
prefetches[prefetch_start_index % prefetches.size()]
.first->copy_start_schedule_after() == i;
++prefetch_start_index) {
float& prefetch_remaining =
prefetch_remaining_elapsed_times[prefetch_start_index];
prefetch_remaining =
prefetches[prefetch_start_index % prefetches.size()].second;
VLOG(4) << "Prefetch #" << (prefetch_start_index % prefetches.size())
<< " (" << prefetch_start_index << ") started at " << i
<< ", remaining elapsed = " << prefetch_remaining;
}
}
VLOG(3) << "Iteration " << iteration;
VLOG(3) << "Total elapsed: " << total_elapsed
<< ", total critical prefetch: " << total_critical_prefetch
<< ", total bandwidth idle time: " << total_bandwidth_idle_time;
result = total_elapsed + total_critical_prefetch;
}
return result;
}
std::string
MemoryBoundLoopOptimizer::LoopValue::AllocationTypeToString(
LoopValue::AllocationType allocation_type) {
switch (allocation_type) {
case AllocationType::kTemporary:
return "temporary";
case AllocationType::kLoopCarriedDependence:
return "loop-carried dependence";
case AllocationType::kPinned:
return "pinned";
case AllocationType::kPrefetch:
return "prefetch";
default:
CHECK(allocation_type == AllocationType::kUnsupported);
return "unsupported";
}
}
std::string MemoryBoundLoopOptimizer::LoopValue::ToString() const {
std::string values_str;
absl::StrAppend(&values_str, "Values:");
for (const HloValue* hlo_value : hlo_values) {
absl::StrAppend(&values_str, "\n - ", hlo_value->ToShortString());
}
std::string allocations_str;
if (!allocations.empty()) {
absl::StrAppend(&allocations_str, "Allocations:");
}
for (const auto& allocation : allocations) {
absl::StrAppend(&allocations_str, "\n - ", allocation->ToString());
}
return absl::StrCat(
"Size: ", size, " savings: ", savings,
" savings per byte: ", savings_per_byte,
" allocation type: ", AllocationTypeToString(allocation_type), "\n",
values_str, "\n", allocations_str);
}
bool MemoryBoundLoopOptimizer::LoopValue::IsAllocationTypeSupported() const {
return allocation_type == AllocationType::kTemporary ||
allocation_type == AllocationType::kPinned ||
allocation_type == AllocationType::kPrefetch;
}
void MemoryBoundLoopOptimizer::SortLoopValues() {
absl::c_stable_sort(loop_values_, [](const LoopValue& a, const LoopValue& b) {
return a.savings_per_byte > b.savings_per_byte;
});
}
void MemoryBoundLoopOptimizer::AllocateLoopValues() {
std::vector<LoopValue*> prefetch_values;
VLOG(3) << "Pre optimization execution time: " << CalculateExecutionTime();
for (LoopValue& value : loop_values_) {
switch (value.allocation_type) {
case LoopValue::AllocationType::kTemporary:
AllocateTemporary(value);
break;
case LoopValue::AllocationType::kPinned:
if (value.savings > 0) {
AllocatePinned(value);
}
break;
case LoopValue::AllocationType::kPrefetch:
prefetch_values.push_back(&value);
break;
case LoopValue::AllocationType::kLoopCarriedDependence:
case LoopValue::AllocationType::kUnsupported:
VLOG(1) << "Unsupported allocation: " << value.ToString();
}
}
VLOG(3) << "Execution time after allocating temporaries: "
<< CalculateExecutionTime();
AllocatePrefetches(absl::MakeSpan(prefetch_values));
VLOG(3) << "Execution time after allocating prefetches: "
<< CalculateExecutionTime();
}
void MemoryBoundLoopOptimizer::PostProcess() {
for (LoopValue& value : loop_values_) {
absl::flat_hash_set<HloUse> allocated_uses;
for (const auto& allocation : value.allocations) {
for (const HloUse& use : allocation->uses()) {
allocated_uses.insert(use);
}
}
std::vector<HloUse> unallocated_uses;
absl::flat_hash_set<int> use_indices;
for (const auto& [idx, use] : value.loop_uses) {
use_indices.insert(idx);
if (!allocated_uses.contains(use)) {
unallocated_uses.push_back(use);
}
}
for (const auto& [next_iteration_idx, use] : value.next_iteration_uses) {
if (use_indices.contains(next_iteration_idx)) {
continue;
}
HloInstruction* loop_instruction =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + next_iteration_idx);
HloUse loop_use{loop_instruction, use.operand_number, use.operand_index};
if (!allocated_uses.contains(loop_use)) {
unallocated_uses.push_back(loop_use);
}
}
if (!unallocated_uses.empty()) {
value.allocations.push_back(std::make_unique<PinnedAllocation>(
value.hlo_values.front()->defining_position(), MemorySpace::kDefault,
std::nullopt, 0, loop_size_, false));
for (const HloUse& use : unallocated_uses) {
value.allocations.back()->AddUse(use);
}
}
}
}
bool MemoryBoundLoopOptimizer::AllocateBetween(int64_t begin_idx,
int64_t end_idx, int64_t size) {
int64_t end_idx_sentinel = end_idx;
if (end_idx < begin_idx) {
end_idx_sentinel += loop_size_;
}
for (int64_t i = begin_idx; i <= end_idx_sentinel; ++i) {
if (remaining_memory_[i % loop_size_] < size) {
return false;
}
}
for (int64_t i = begin_idx; i <= end_idx_sentinel; ++i) {
remaining_memory_[i % loop_size_] -= size;
}
return true;
}
bool MemoryBoundLoopOptimizer::AllocateTemporary(LoopValue& value) {
VLOG(3) << "AllocateTemporary: " << value.ToString();
if (value.hlo_values.size() > 1) {
VLOG(3) << "LoopValue has more than one hlo value associated.";
return false;
}
int64_t definition_idx = value.loop_positions.front().first;
int64_t max_use_idx;
if (!value.next_iteration_uses.empty()) {
max_use_idx = value.next_iteration_uses.back().first;
CHECK_LT(max_use_idx, definition_idx);
} else {
max_use_idx = value.loop_uses.back().first;
}
bool success = AllocateBetween(definition_idx, max_use_idx, value.size);
if (success) {
VLOG(3) << "Pos: " << value.loop_positions[0].second;
value.allocations.push_back(std::make_unique<PinnedAllocation>(
value.loop_positions[0].second, MemorySpace::kAlternate, std::nullopt,
definition_idx, max_use_idx,
false));
AddAllLoopPositionsAndUses(value, true);
}
return success;
}
bool MemoryBoundLoopOptimizer::AllocatePinned(LoopValue& value) {
bool success = AllocateBetween(0, loop_size_ - 1, value.size);
if (success) {
CHECK(value.header_position);
value.allocations.push_back(std::make_unique<PinnedAllocation>(
*value.header_position, MemorySpace::kAlternate, std::nullopt, 0,
loop_size_,
false));
AddAllLoopPositionsAndUses(value, false);
}
return success;
}
bool MemoryBoundLoopOptimizer::AllocatePrefetches(
absl::Span<LoopValue*> values) {
VLOG(3) << "Allocating prefetches num values: " << values.size();
AllocatePrefetchesContext context;
context.values = values;
context.value_indices.resize(values.size());
absl::c_iota(context.value_indices, 0);
absl::c_stable_sort(context.value_indices, [&](int a, int b) {
return std::forward_as_tuple(
values[a]->loop_uses.begin()->first,
values[a]->loop_uses.begin()->second.operand_number) >
std::forward_as_tuple(
values[b]->loop_uses.begin()->first,
values[b]->loop_uses.begin()->second.operand_number);
});
absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>
additional_uses_in_alternate_mem;
absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>
additional_positions_in_alternate_mem;
for (const LoopValue* value : values) {
VLOG(3) << " prefetch value: " << value->ToString();
for (const auto& [idx, use] : value->loop_uses) {
additional_uses_in_alternate_mem[use.instruction].push_back(
{use.operand_number, use.operand_index});
}
for (const auto& [idx, position] : value->loop_positions) {
additional_positions_in_alternate_mem[position.instruction].push_back(
position.index);
}
}
for (int i = 0; i < loop_size_; ++i) {
context.bandwidth_idle_times.push_back(
GetBandwidthIdleTime(i, additional_uses_in_alternate_mem,
additional_positions_in_alternate_mem));
VLOG(3) << "Remaining bandwidth at " << i << " = "
<< *context.bandwidth_idle_times.rbegin();
}
context.additional_memory_used.resize(loop_size_, 0);
for (int value_index : context.value_indices) {
AllocatePrefetch(value_index, context);
}
for (int i = 0; i < loop_size_; ++i) {
remaining_memory_[i] -= context.additional_memory_used[i];
VLOG(3) << "Additional memory [" << i
<< "]: " << context.additional_memory_used[i];
VLOG(3) << "Remaining memory [" << i << "]: " << remaining_memory_[i];
VLOG(3) << "Remaining bandwidth [" << i
<< "] : " << context.bandwidth_idle_times[i];
}
return true;
}
bool MemoryBoundLoopOptimizer::AllocatePrefetch(
int value_index, AllocatePrefetchesContext& context) {
LoopValue* value = context.values.at(value_index);
VLOG(3) << "Allocating value: " << value->ToString();
int first_use_idx = value->loop_uses.front().first;
int last_use_idx = value->loop_uses.back().first;
int last_use_idx_sentinel = last_use_idx;
if (!value->next_iteration_uses.empty()) {
last_use_idx = value->next_iteration_uses.back().first;
last_use_idx_sentinel = last_use_idx + loop_size_;
CHECK_LT(last_use_idx, first_use_idx);
}
bool out_of_memory = false;
for (int i = first_use_idx; i <= last_use_idx_sentinel; ++i) {
int loop_idx = i % loop_size_;
if (context.additional_memory_used[loop_idx] + value->size >
remaining_memory_[loop_idx]) {
VLOG(3) << "Ran out of memory allocating for uses.";
out_of_memory = true;
}
}
if (out_of_memory) {
return false;
}
float copy_resource =
cost_analysis_.GetAsyncCopyElapsed(value->hlo_values.front()->shape());
VLOG(3) << "First use: " << value->loop_uses.begin()->second
<< " use idx: " << first_use_idx
<< " copy resource: " << copy_resource;
std::optional<int> copy_start_time;
float accumulated_copy_resource = 0;
std::vector<int> early_forced_prefetch_value_indices;
int early_forced_prefetch_value_search_index = 0;
float early_forced_prefetch_additional_memory = 0;
for (int i = first_use_idx - 1; i >= last_use_idx_sentinel - loop_size_;
--i) {
int loop_idx = (i + loop_size_) % loop_size_;
if (i < 0) {
for (; context.value_indices[early_forced_prefetch_value_search_index] !=
value_index;
++early_forced_prefetch_value_search_index) {
VLOG(3) << "Searching for early forced: "
<< early_forced_prefetch_value_search_index;
LoopValue* early_forced_value = context.values.at(
context.value_indices[early_forced_prefetch_value_search_index]);
if (early_forced_value->allocations.empty()) {
continue;
}
const CopyAllocation* early_forced_prefetch =
static_cast<const CopyAllocation*>(
early_forced_value->allocations.back().get());
VLOG(3) << "Prefetch: " << early_forced_prefetch->ToString();
if (early_forced_prefetch->copy_done_schedule_before() <=
early_forced_prefetch->copy_start_schedule_after() + 1 ||
(early_forced_prefetch->copy_start_schedule_after() ==
loop_size_ - 1 &&
early_forced_prefetch->copy_done_schedule_before() == 0)) {
break;
}
if (early_forced_prefetch->copy_start_schedule_after() != loop_idx) {
break;
}
early_forced_prefetch_value_indices.push_back(
early_forced_prefetch_value_search_index);
early_forced_prefetch_additional_memory += early_forced_value->size;
VLOG(3) << "Found early-forced prefetch value: "
<< early_forced_value->ToString();
VLOG(3) << "Early forced prefetch additional memory: "
<< early_forced_prefetch_additional_memory;
}
}
int64_t overlap_memory_overhead = 0;
if (loop_idx == last_use_idx) {
overlap_memory_overhead = value->size;
VLOG(3) << "Loop idx == last use idx (" << loop_idx
<< "), overlap memory overhead = " << overlap_memory_overhead;
}
if (context.additional_memory_used[loop_idx] + value->size +
overlap_memory_overhead + early_forced_prefetch_additional_memory >
remaining_memory_[loop_idx]) {
VLOG(3) << "Ran out of memory. Accumulated copy resource "
<< accumulated_copy_resource << " out of " << copy_resource
<< " at " << loop_idx;
break;
}
float bandwidth_idle_time = context.bandwidth_idle_times[loop_idx];
VLOG(3) << "Idx " << loop_idx
<< " bandwidth_idle_time: " << bandwidth_idle_time
<< " copy resource remaining: "
<< (copy_resource - accumulated_copy_resource) << " diff: "
<< (bandwidth_idle_time -
(copy_resource - accumulated_copy_resource));
if (bandwidth_idle_time >= copy_resource - accumulated_copy_resource) {
accumulated_copy_resource = copy_resource;
copy_start_time = loop_idx;
VLOG(3) << "Found the complete copy ratio and updated accumulated copy "
"resource: "
<< accumulated_copy_resource;
break;
} else if (!copy_start_time &&
accumulated_copy_resource + bandwidth_idle_time >=
copy_resource * options_.desired_copy_ratio()) {
accumulated_copy_resource += bandwidth_idle_time;
copy_start_time = loop_idx;
VLOG(3) << "Found the desired copy ratio and updated accumulated copy "
"resource: "
<< accumulated_copy_resource;
} else if (options_.allow_unsatisfied_fully_pipelined_prefetch() &&
loop_idx == last_use_idx) {
accumulated_copy_resource += bandwidth_idle_time;
copy_start_time = loop_idx;
VLOG(3) << "Could not reach the desired copy ratio but scheduling "
"fully pipelined prefetch anyway: "
<< accumulated_copy_resource;
break;
} else {
accumulated_copy_resource += bandwidth_idle_time;
VLOG(3) << "Updated accumulated copy resource: "
<< accumulated_copy_resource;
}
}
if (!copy_start_time) {
return false;
}
VLOG(3) << "Success: copy_start_time: " << *copy_start_time
<< " leftover copy resource: "
<< (copy_resource - accumulated_copy_resource);
auto update_additional_memory_used = [&](int loop_idx, int64_t addition) {
VLOG(4) << "Updating additional memory used at " << loop_idx << ". "
<< context.additional_memory_used[loop_idx] << " + " << addition
<< " => " << (context.additional_memory_used[loop_idx] + addition)
<< " (remaining: " << remaining_memory_[loop_idx] << ")";
context.additional_memory_used[loop_idx] += addition;
CHECK_LE(context.additional_memory_used[loop_idx],
remaining_memory_[loop_idx]);
};
for (int i = first_use_idx; i <= last_use_idx_sentinel; ++i) {
int loop_idx = i % loop_size_;
update_additional_memory_used(loop_idx, value->size);
}
accumulated_copy_resource = 0.0;
for (int i = first_use_idx - 1; i >= last_use_idx_sentinel - loop_size_;
--i) {
int loop_idx = (i + loop_size_) % loop_size_;
float& bandwidth_idle_time = context.bandwidth_idle_times[loop_idx];
int64_t overlap_memory_overhead = 0;
update_additional_memory_used(loop_idx,
value->size + overlap_memory_overhead);
if (bandwidth_idle_time < copy_resource - accumulated_copy_resource) {
accumulated_copy_resource += bandwidth_idle_time;
bandwidth_idle_time = 0;
if (loop_idx == *copy_start_time) {
VLOG(3) << "Remaining copy resource: "
<< (copy_resource - accumulated_copy_resource);
break;
}
} else {
bandwidth_idle_time -= copy_resource - accumulated_copy_resource;
CHECK_EQ(loop_idx, *copy_start_time);
break;
}
}
CHECK(value->header_position);
value->allocations.push_back(std::make_unique<PinnedAllocation>(
*value->header_position, MemorySpace::kDefault, std::nullopt, 0,
loop_size_, false));
value->allocations.push_back(std::make_unique<CopyAllocation>(
*value->allocations.back(), MemorySpace::kAlternate, std::nullopt,
((*copy_start_time - 1) + loop_size_) % loop_size_, first_use_idx,
last_use_idx_sentinel));
AddAllLoopPositionsAndUses(*value, true);
for (int early_forced_prefetch_value_index :
early_forced_prefetch_value_indices) {
LoopValue* early_forced_value = context.values.at(
context.value_indices[early_forced_prefetch_value_index]);
CHECK(!early_forced_value->allocations.empty());
CopyAllocation* early_forced_prefetch = static_cast<CopyAllocation*>(
early_forced_value->allocations.back().get());
for (int index = early_forced_prefetch->copy_start_schedule_after();
index >= *copy_start_time; --index) {
update_additional_memory_used(index, early_forced_value->size);
VLOG(3) << "Additional memory used: " << index << " "
<< context.additional_memory_used[index];
}
early_forced_prefetch->set_copy_start_schedule_after(
((*copy_start_time - 1) + loop_size_) % loop_size_);
VLOG(3) << "Updated prefetch: " << early_forced_prefetch->ToString();
}
return true;
}
void MemoryBoundLoopOptimizer::AddAllLoopPositionsAndUses(
LoopValue& value, bool allocate_next_iteration_uses) {
CHECK_GE(value.allocations.size(), 1);
Allocation& allocation = *value.allocations.back();
for (const auto& [idx, position] : value.loop_positions) {
positions_in_alternate_mem_[position.instruction].push_back(position.index);
}
for (const auto& [idx, use] : value.loop_uses) {
uses_in_alternate_mem_[use.instruction].push_back(
{use.operand_number, use.operand_index});
allocation.AddUse(use);
}
if (allocate_next_iteration_uses) {
for (const auto& [next_iteration_idx, use] : value.next_iteration_uses) {
HloInstruction* loop_instruction =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + next_iteration_idx);
uses_in_alternate_mem_[loop_instruction].push_back(
{use.operand_number, use.operand_index});
allocation.AddUse(
{loop_instruction, use.operand_number, use.operand_index});
}
}
}
float MemoryBoundLoopOptimizer::GetBandwidthIdleTime(int idx) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> empty_operands;
std::vector<ShapeIndex> empty_outputs;
const std::vector<std::pair<int64_t, ShapeIndex>>* operands_in_alternate_mem =
&empty_operands;
const std::vector<ShapeIndex>* outputs_in_alternate_mem = &empty_outputs;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = &uses_it->second;
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = &positions_it->second;
}
return cost_analysis_.GetDefaultMemoryBandwidthIdleTime(
*inst, *operands_in_alternate_mem, *outputs_in_alternate_mem);
}
float MemoryBoundLoopOptimizer::GetBandwidthIdleTime(
int idx,
const absl::flat_hash_map<const HloInstruction*,
std::vector<std::pair<int64_t, ShapeIndex>>>&
additional_uses_in_alternate_mem,
const absl::flat_hash_map<const HloInstruction*, std::vector<ShapeIndex>>&
additional_positions_in_alternate_mem) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> operands_in_alternate_mem;
std::vector<ShapeIndex> outputs_in_alternate_mem;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = uses_it->second;
}
auto additional_uses_it = additional_uses_in_alternate_mem.find(inst);
if (additional_uses_it != additional_uses_in_alternate_mem.end()) {
absl::c_copy(additional_uses_it->second,
std::back_inserter(operands_in_alternate_mem));
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = positions_it->second;
}
auto additional_positions_it =
additional_positions_in_alternate_mem.find(inst);
if (additional_positions_it != additional_positions_in_alternate_mem.end()) {
absl::c_copy(additional_positions_it->second,
std::back_inserter(outputs_in_alternate_mem));
}
return cost_analysis_.GetDefaultMemoryBandwidthIdleTime(
*inst, operands_in_alternate_mem, outputs_in_alternate_mem);
}
float MemoryBoundLoopOptimizer::GetInstructionElapsed(int idx) const {
const HloInstruction* inst =
hlo_live_range_.flattened_instruction_sequence().instructions().at(
loop_start_ + idx);
std::vector<std::pair<int64_t, ShapeIndex>> empty_operands;
std::vector<ShapeIndex> empty_outputs;
const std::vector<std::pair<int64_t, ShapeIndex>>* operands_in_alternate_mem =
&empty_operands;
const std::vector<ShapeIndex>* outputs_in_alternate_mem = &empty_outputs;
auto uses_it = uses_in_alternate_mem_.find(inst);
if (uses_it != uses_in_alternate_mem_.end()) {
operands_in_alternate_mem = &uses_it->second;
}
auto positions_it = positions_in_alternate_mem_.find(inst);
if (positions_it != positions_in_alternate_mem_.end()) {
outputs_in_alternate_mem = &positions_it->second;
}
return cost_analysis_.GetInstructionElapsedInAlternateMemory(
*inst, *operands_in_alternate_mem, *outputs_in_alternate_mem);
}
}
} | #include "xla/service/memory_space_assignment/memory_bound_loop_optimizer.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "re2/re2.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/allocation.h"
#include "xla/service/memory_space_assignment/buffer_interval_comparator.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/service/memory_space_assignment/options.h"
#include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace memory_space_assignment {
namespace {
using ::testing::ContainerEq;
using ::testing::HasSubstr;
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
int64_t SizeFunction(const BufferValue& value) {
return ShapeSize(value.shape());
}
int64_t ReservedScopedMemoryFn(
const HloInstruction* instruction,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&
operands_in_alternate_memory,
const absl::flat_hash_set<ShapeIndex>& outputs_in_alternate_memory) {
return 0;
}
class LoopOptimizerBestFitHeapTest : public ::testing::Test {
public:
LoopOptimizerBestFitHeapTest()
: heap_(64, 6,
8) {}
bool IsAllocateSameEvenAndOddBetweenSuccessful(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.AllocateSameEvenAndOddBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool CanFindSameEvenAndOddAllocationBetween(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.FindSameEvenAndOddAllocationBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool IsAllocateEvenAndOddBetweenSuccessful(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.AllocateEvenAndOddBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
bool CanFindEvenAndOddAllocationBetween(int64_t begin_idx_in_loop,
int64_t end_idx_in_loop,
int64_t size) {
EvenOddChunkPair chunks = heap_.FindEvenAndOddAllocationBetween(
begin_idx_in_loop, end_idx_in_loop, size);
return chunks.first.has_value() && chunks.second.has_value();
}
std::string GetMemoryUsageAsciiArt() { return heap_.MemoryUsageToAsciiArt(); }
protected:
LoopOptimizerBestFitHeap heap_;
};
TEST_F(LoopOptimizerBestFitHeapTest, TestAllocateSameEvenAndOddBetween) {
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(3, 8, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(-3, 2, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 2, 16));
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(3, 5, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 48);
EXPECT_TRUE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 5, 16));
EXPECT_FALSE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 5, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{0, 0, 0, 0, 0, 0}));
std::string memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(memory_usage, HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 32"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 16"));
EXPECT_THAT(memory_usage, HasSubstr("234567 890123"));
}
TEST_F(LoopOptimizerBestFitHeapTest, TestAllocateEvenAndOddBetween) {
EXPECT_TRUE(IsAllocateEvenAndOddBetweenSuccessful(3, 11, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 32);
EXPECT_TRUE(IsAllocateEvenAndOddBetweenSuccessful(-3, 8, 16));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{16, 16, 16, 0, 0, 0}));
std::string memory_usage = heap_.MemoryUsageToAsciiArt();
EXPECT_THAT(
memory_usage,
HasSubstr(
"Memory map for time: [0,35], memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage,
HasSubstr("...... ...### ###### ###### ###### ###... 64"));
EXPECT_THAT(memory_usage,
HasSubstr("...### ###### ###### ###### ###... ...... 48"));
EXPECT_THAT(memory_usage,
HasSubstr("...... ...### ###### ...### ###### ...... 32"));
EXPECT_THAT(memory_usage,
HasSubstr("...### ###### ...### ###### ...... ...... 16"));
EXPECT_THAT(memory_usage,
HasSubstr("012345 678901 234567 890123 456789 012345"));
}
TEST_F(LoopOptimizerBestFitHeapTest, TestRemoveChunk) {
EvenOddChunkPair chunks = heap_.AllocateEvenAndOddBetween(3, 11, 16);
EXPECT_TRUE(chunks.first.has_value() && chunks.second.has_value());
EvenOddChunkPair second_chunks = heap_.AllocateEvenAndOddBetween(-3, 8, 16);
EXPECT_TRUE(second_chunks.first.has_value() &&
second_chunks.second.has_value());
EXPECT_THAT(heap_.RemainingMemoryByTime(),
ContainerEq(std::vector<int64_t>{16, 16, 16, 0, 0, 0}));
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 64);
std::string memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(memory_usage, HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(memory_usage, HasSubstr("###### ...### 32"));
EXPECT_THAT(memory_usage, HasSubstr("...### ###### 16"));
EXPECT_THAT(memory_usage, HasSubstr("234567 890123"));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 2, 16));
EXPECT_FALSE(IsAllocateSameEvenAndOddBetweenSuccessful(0, 2, 16));
EXPECT_FALSE(CanFindEvenAndOddAllocationBetween(0, 11, 16));
heap_.RemoveEvenOddChunkPair(3, 11, chunks);
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 11, 16));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(-3, 8, 16));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(0, 5, 32));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(-1, 4, 32));
EXPECT_TRUE(CanFindEvenAndOddAllocationBetween(2, 7, 32));
EXPECT_FALSE(CanFindEvenAndOddAllocationBetween(0, 6, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(0, 5, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(-1, 4, 32));
EXPECT_TRUE(CanFindSameEvenAndOddAllocationBetween(2, 7, 32));
std::string updated_memory_usage = heap_.MemoryUsageToAsciiArt(2, 3);
EXPECT_THAT(updated_memory_usage,
HasSubstr("Memory map for time: [12,23], "
"memory_block_size: 16, group_size: 6"));
EXPECT_THAT(updated_memory_usage, HasSubstr("###### ###### 64"));
EXPECT_THAT(updated_memory_usage, HasSubstr("###### ###### 48"));
EXPECT_THAT(updated_memory_usage, HasSubstr("...... ...... 32"));
EXPECT_THAT(updated_memory_usage, HasSubstr("...... ...... 16"));
EXPECT_THAT(updated_memory_usage, HasSubstr("234567 890123"));
heap_.RemoveEvenOddChunkPair(-3, 8, second_chunks);
EXPECT_EQ(heap_.LastMemoryOffsetOccupied(), 0);
}
class MemoryBoundLoopOptimizerTest : public HloTestBase {
public:
MemoryBoundLoopOptimizerTest() = default;
protected:
const int64_t kAlternateMemorySpace = 1;
const int64_t kDefaultMemorySpace = 0;
absl::Status Initialize(const HloModule* module,
uint64_t alternate_memory_size = 256) {
HloCostAnalysis::Options options;
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
optimizer_options.set_min_num_iterations(3.0);
options_.memory_bound_loop_optimizer_options = optimizer_options;
cost_analysis_options_.alternate_mem_bandwidth_bytes_per_second = 128;
cost_analysis_options_.async_copy_bandwidth_bytes_per_second = 32;
cost_analysis_options_.pipeline_overhead_window_size_mib = 1;
options.shape_size = ShapeSize;
options.set_flops_per_second(16);
options.set_bytes_per_second(32);
options.set_transcendentals_per_second(16);
hlo_cost_analysis_ = std::make_unique<HloCostAnalysis>(options);
TF_RETURN_IF_ERROR(
module->entry_computation()->Accept(hlo_cost_analysis_.get()));
hlo_cost_analysis_costs_ =
std::make_unique<HloCostAnalysisCosts>(*hlo_cost_analysis_);
TF_ASSIGN_OR_RETURN(cost_analysis_,
CostAnalysis::Create(*hlo_cost_analysis_costs_,
cost_analysis_options_, *module));
TF_ASSIGN_OR_RETURN(alias_analysis_, HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(live_range_,
HloLiveRange::Run(module->schedule(), *alias_analysis_,
module->entry_computation()));
return absl::OkStatus();
}
absl::StatusOr<MemoryBoundLoopOptimizer*> CreateOptimizer(
int loop_start, int loop_end, const HloModule* module,
uint64_t alternate_memory_size = 256,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
MemoryBoundLoopOptimizerOptions optimizer_options;
optimizer_options.set_enabled(true);
optimizer_options.set_desired_copy_ratio(0.7);
optimizer_options.set_allow_unsatisfied_fully_pipelined_prefetch(false);
TF_ASSIGN_OR_RETURN(
optimizer_,
MemoryBoundLoopOptimizer::Create(
loop_start, loop_end, alternate_memory_size, optimizer_options,
*live_range_, *alias_analysis_, *cost_analysis_, SizeFunction,
reserved_scoped_memory_fn));
return optimizer_.get();
}
absl::StatusOr<std::unique_ptr<HloModule>> ParseAndCreateOptimizer(
absl::string_view hlo_loop_str, uint64_t alternate_memory_size,
int& loop_start_idx, MemoryBoundLoopOptimizer** optimizer,
const ReservedScopedMemoryFunction& reserved_scoped_memory_fn =
ReservedScopedMemoryFn) {
int loop_end_idx;
TF_ASSIGN_OR_RETURN(
std::string module_str,
ParseAndCreateModuleString(hlo_loop_str, loop_start_idx, loop_end_idx));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSIGN_OR_RETURN(
*optimizer,
CreateOptimizer(loop_start_idx, loop_end_idx, module.get(),
alternate_memory_size, reserved_scoped_memory_fn));
return std::move(module);
}
absl::StatusOr<std::string> ParseAndCreateModuleString(
absl::string_view hlo_loop_str, int& loop_start_idx, int& loop_end_idx) {
RE2 op_re("\\$op([0-9]+) += +(\\S+).*");
std::vector<absl::string_view> ops;
std::vector<absl::string_view> op_types;
int begin_pos = 0;
absl::string_view submatch[3];
while (op_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int op_num;
if (!absl::SimpleAtoi(submatch[1], &op_num)) {
return InvalidArgument("Op name expects to contain a number, found %s.",
submatch[1]);
}
if (op_num != ops.size()) {
return InvalidArgument("Op number expected to be %d found %d.",
op_types.size(), op_num);
}
ops.push_back(submatch[0]);
op_types.push_back(submatch[2]);
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 param_re("([[:alnum:]]+\\[\\S*\\]) +\\$param([0-9]+)");
std::vector<absl::string_view> param_types;
begin_pos = 0;
while (param_re.Match(hlo_loop_str, begin_pos, hlo_loop_str.size(),
RE2::UNANCHORED, submatch, 3)) {
for (int i = 0; i < 3; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
int param_num;
if (!absl::SimpleAtoi(submatch[2], ¶m_num)) {
return InvalidArgument(
"Param name expects to contain a number, found %s.", submatch[2]);
}
while (param_num >= param_types.size()) {
param_types.push_back({});
}
param_types[param_num] = submatch[1];
begin_pos = submatch[0].data() - hlo_loop_str.data() + submatch[0].size();
}
RE2 root_re("ROOT \\$root += +tuple\\((.*)\\)");
absl::string_view root_values;
if (root_re.Match(hlo_loop_str, 0, hlo_loop_str.size(), RE2::UNANCHORED,
submatch, 2)) {
for (int i = 0; i < 2; ++i) {
if (submatch[i].data() == nullptr) {
VLOG(4) << "Submatch[" << i << "] = nullptr";
} else {
VLOG(4) << "Submatch[" << i << "] = " << submatch[i]
<< " (idx: " << (submatch[i].data() - hlo_loop_str.data())
<< ")";
}
}
root_values = submatch[1];
}
for (absl::string_view op_type : op_types) {
VLOG(4) << "op_type: " << op_type;
}
for (absl::string_view param_type : param_types) {
VLOG(4) << "param_type: " << param_type;
}
std::string hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY Entry {
)";
int total_instructions = 0;
for (absl::string_view param_prefix : {"prev_", "", "next_"}) {
for (int i = 0; i < param_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", param_prefix, "param", i, " = ",
param_types[i], " parameter(", parameter_number,
")
}
}
for (int i = 0; i < op_types.size(); ++i) {
int parameter_number = total_instructions;
absl::StrAppend(&hlo_string, " ", "prev_prev_op", i, " = ", op_types[i],
" parameter(", parameter_number, ")
total_instructions++, "\n");
}
std::string new_root_values;
auto print_ops =
[&](const std::vector<std::pair<const absl::string_view, std::string>>&
replacements) {
for (int i = 0; i < ops.size(); ++i) {
absl::StrAppend(&hlo_string, " ",
absl::StrReplaceAll(ops[i], replacements), "
total_instructions++, "\n");
}
if (!root_values.empty()) {
absl::StrAppend(&new_root_values,
new_root_values.empty() ? "" : ", ",
absl::StrReplaceAll(root_values, replacements));
}
};
std::vector<std::pair<const absl::string_view, std::string>>
prev_replacements;
prev_replacements.push_back({"$prev_op", "prev_prev_op"});
prev_replacements.push_back({"$op", "prev_op"});
prev_replacements.push_back({"$param", "prev_param"});
absl::StrAppend(&hlo_string, "
print_ops(prev_replacements);
loop_start_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>> replacements;
replacements.push_back({"$", ""});
absl::StrAppend(&hlo_string, "
print_ops(replacements);
loop_end_idx = total_instructions;
std::vector<std::pair<const absl::string_view, std::string>>
next_replacements;
next_replacements.push_back({"$prev_op", "op"});
next_replacements.push_back({"$op", "next_op"});
next_replacements.push_back({"$param", "next_param"});
absl::StrAppend(&hlo_string, "
print_ops(next_replacements);
absl::StrAppend(&hlo_string, " ROOT root = tuple(", new_root_values,
")\n");
absl::StrAppend(&hlo_string, "}");
VLOG(1) << hlo_string;
return hlo_string;
}
absl::StatusOr<std::unique_ptr<PresetAssignments>> RunMsa(
HloModule* module, uint64_t alternate_memory_size = 256) {
options_.max_size_in_bytes = alternate_memory_size;
options_.alignment_in_bytes = 8;
options_.verify = true;
options_.alternate_memory_space = kAlternateMemorySpace;
if (!cost_analysis_) {
TF_RETURN_IF_ERROR(Initialize(module, alternate_memory_size));
}
CostAnalysis::Cache cache;
MemoryBoundednessBufferIntervalComparator comparator(*cost_analysis_,
&cache);
options_.buffer_interval_comparator = &comparator;
CostAnalysisPrefetchIntervalPicker prefetch_interval_picker(
CostAnalysisPrefetchIntervalPicker(
*cost_analysis_, 0.8,
1.5,
10.0,
alternate_memory_size));
options_.prefetch_interval_picker = &prefetch_interval_picker;
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(), 8);
};
options_.size_fn = size_fn;
auto is_allowed_in_alternate_mem = [](const HloValue& value) {
HloInstruction* instruction = value.instruction();
HloComputation* computation = instruction->parent();
bool in_entry_computation =
(computation == computation->parent()->entry_computation());
if (in_entry_computation &&
instruction->opcode() == HloOpcode::kParameter) {
return false;
}
return true;
};
options_.is_allowed_in_alternate_mem_fn = is_allowed_in_alternate_mem;
options_.max_outstanding_prefetches = -1;
options_.max_outstanding_evictions = -1;
options_.cost_analysis = cost_analysis_.get();
std::unique_ptr<PresetAssignments> preset_assignments =
MemorySpaceAssignment::Run(module, *live_range_, *alias_analysis_,
options_)
.value();
return preset_assignments;
}
absl::Status VerifyMsaEquivalence(
HloModule* module, bool expect_unsupported_allocations = false) {
absl::flat_hash_map<std::pair<int, int>, const Allocation*> allocation_map;
for (const MemoryBoundLoopOptimizer::LoopValue& value :
optimizer_->loop_values()) {
if (!value.IsAllocationTypeSupported()) {
continue;
}
for (const auto& allocation : value.allocations) {
for (const HloUse& use : allocation->uses()) {
absl::string_view inst_name = use.instruction->name();
TF_RET_CHECK(absl::StartsWith(inst_name, "op"));
int inst_number;
TF_RET_CHECK(absl::SimpleAtoi(inst_name.substr(2), &inst_number));
allocation_map[{inst_number, use.operand_number}] = allocation.get();
}
}
}
auto get_inst_prefix_in_iter = [](int iteration) {
switch (iteration) {
case 0:
return "prev_";
case 1:
return "";
case 2:
return "next_";
default:
LOG(FATAL) << "Invalid iteration " << iteration;
return "INVALID";
}
};
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module));
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloLiveRange> live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const auto& flattened_instructions =
live_range->flattened_instruction_sequence().instructions();
for (int iteration = 1; iteration < 3; ++iteration) {
for (int inst_number = 0; inst_number < optimizer_->loop_size();
++inst_number) {
HloInstruction* inst = FindInstruction(
module, absl::StrCat(get_inst_prefix_in_iter(iteration), "op",
inst_number));
for (int operand_number = 0; operand_number < 2; ++operand_number) {
const HloInstruction* operand = inst->operand(operand_number);
LOG(INFO) << inst->name() << ", operand " << operand_number;
if (!allocation_map.contains({inst_number, operand_number})) {
TF_RET_CHECK(expect_unsupported_allocations);
continue;
}
const Allocation* allocation =
allocation_map.at({inst_number, operand_number});
if (!allocation->is_copy_allocation()) {
EXPECT_NE(operand->opcode(), HloOpcode::kCopyDone);
int expected_memory_space =
allocation->memory_space() == MemorySpace::kDefault
? kDefaultMemorySpace
: kAlternateMemorySpace;
EXPECT_EQ(operand->shape().layout().memory_space(),
expected_memory_space);
} else {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kAlternate);
TF_RET_CHECK(operand->opcode() == HloOpcode::kCopyDone);
const CopyAllocation* copy_allocation =
static_cast<const CopyAllocation*>(allocation);
if (copy_allocation->copy_done_schedule_before() != inst_number) {
EXPECT_NE(allocation->uses().front(),
(HloUse{inst, operand_number}));
continue;
}
int expected_copy_start_iteration = iteration;
if (copy_allocation->copy_start_schedule_after() ==
optimizer_->loop_size() &&
copy_allocation->copy_done_schedule_before() == 0) {
expected_copy_start_iteration -= 2;
} else if (copy_allocation->copy_start_schedule_after() + 1 >=
copy_allocation->copy_done_schedule_before()) {
expected_copy_start_iteration -= 1;
}
if (expected_copy_start_iteration >= 0) {
const HloInstruction* expected_copy_start_schedule_after =
FindInstruction(
module,
absl::StrCat(
get_inst_prefix_in_iter(
expected_copy_start_iteration),
"op", copy_allocation->copy_start_schedule_after()));
LOG(INFO) << "Expected copy start schedule after: "
<< expected_copy_start_schedule_after->name();
const HloInstruction* copy_start = operand->operand(0);
TF_RET_CHECK(copy_start->opcode() == HloOpcode::kCopyStart);
int copy_start_idx =
live_range->instruction_schedule().at(copy_start);
const HloInstruction* copy_start_schedule_after = nullptr;
for (int i = copy_start_idx - 1; i >= 0; --i) {
HloOpcode opcode = flattened_instructions.at(i)->opcode();
if (opcode != HloOpcode::kCopyStart &&
opcode != HloOpcode::kCopyDone &&
opcode != HloOpcode::kGetTupleElement &&
opcode != HloOpcode::kParameter) {
copy_start_schedule_after = flattened_instructions.at(i);
break;
}
}
TF_RET_CHECK(copy_start_schedule_after != nullptr);
EXPECT_EQ(copy_start_schedule_after,
expected_copy_start_schedule_after);
}
}
}
}
}
return absl::OkStatus();
}
private:
Options options_;
CostAnalysisOptions cost_analysis_options_;
std::unique_ptr<HloCostAnalysis> hlo_cost_analysis_;
std::unique_ptr<HloCostAnalysisCosts> hlo_cost_analysis_costs_;
std::unique_ptr<CostAnalysis> cost_analysis_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
std::unique_ptr<HloLiveRange> live_range_;
std::unique_ptr<MemoryBoundLoopOptimizer> optimizer_;
};
TEST_F(MemoryBoundLoopOptimizerTest, SimplePrefetch) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 64;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
if (loop_value.hlo_values.front()
->defining_position()
.instruction->name() == "param0") {
EXPECT_TRUE(loop_value.allocations.back()->is_copy_allocation());
}
for (const auto& allocation : loop_value.allocations) {
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 1.875);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, ReservedScopedMemory) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module,
ParseAndCreateOptimizer(
hlo_loop_str,
128, loop_start_idx, &optimizer,
[](const HloInstruction*,
const absl::flat_hash_set<std::pair<int, ShapeIndex>>&,
const absl::flat_hash_set<ShapeIndex>&) { return 128; }));
optimizer->Optimize();
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << "Loop value: " << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
ASSERT_NE(static_cast<int64_t>(allocation->memory_space()),
kAlternateMemorySpace);
}
}
}
TEST_F(MemoryBoundLoopOptimizerTest, GetTupleElement) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = f32[1,4] parameter(5)
p6 = f32[1,4] parameter(6)
tupleparam = (f32[1,4], f32[1,4]) parameter(7)
op1 = tanh(p0)
op2 = tanh(p1)
op3 = tanh(op2)
op4 = add(op1, op3)
op5 = tanh(p2)
op6 = tanh(p3)
op7 = tanh(op6)
op8 = add(op5, op7)
op9 = tanh(p4)
op10 = tanh(p5)
op11 = tanh(op10)
op12 = add(op9, op11)
op13 = tanh(p6)
gte = get-tuple-element(tupleparam), index=1
op14 = tanh(gte)
op15 = tanh(op14)
op16 = add(op13, op15)
ROOT root = tuple(tupleparam, op4, op8, op12, op16)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
VLOG(1) << "Original module:\n"
<< module->ToString(HloPrintOptions::ShortParsable());
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments, RunMsa(module.get()));
}
TEST_F(MemoryBoundLoopOptimizerTest, NoAlternateMem) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[1,4] add(f32[1,4] $prev_op4, f32[1,4] $op0)
$op2 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $param0, f32[1,4] $op3)
ROOT $root = tuple($op4, $param0)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndCreateOptimizer(hlo_loop_str,
0,
loop_start_idx, &optimizer));
optimizer->Optimize();
absl::flat_hash_set<HloUse> seen_uses;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
LOG(INFO) << loop_value.ToString();
for (const auto& allocation : loop_value.allocations) {
EXPECT_EQ(allocation->memory_space(), MemorySpace::kDefault);
for (const HloUse& use : allocation->uses()) {
EXPECT_FALSE(seen_uses.contains(use)) << use.ToString();
seen_uses.insert(use);
}
}
}
for (absl::string_view inst_name : {"op0", "op1", "op2", "op3", "op4"}) {
HloInstruction* inst =
module->entry_computation()->GetInstructionWithName(inst_name);
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 0})) << inst_name;
EXPECT_TRUE(seen_uses.contains(HloUse{inst, 1})) << inst_name;
}
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithOverlap) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 432;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 3);
bool seen_overlap = false;
bool seen_nonoverlap = false;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op14") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 14);
EXPECT_EQ(prefetch->copy_start_schedule_after(), 0);
} else {
ASSERT_EQ(use.instruction->name(), "op1");
EXPECT_EQ(prefetch->copy_done_schedule_before(), 1);
if (prefetch->copy_start_schedule_after() == 0) {
EXPECT_FALSE(seen_overlap);
seen_overlap = true;
} else {
EXPECT_GT(prefetch->copy_start_schedule_after(), 1);
EXPECT_FALSE(seen_nonoverlap);
seen_nonoverlap = true;
}
}
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 12.5);
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0),
alternate_memory_size - (3 * 16 + 128 + 128));
EXPECT_EQ(remaining_memory.at(1),
alternate_memory_size - (2 * 16 + 2 * 128 + 128 + 16));
EXPECT_EQ(remaining_memory.at(2),
alternate_memory_size - (3 * 16 + 128 + 16));
EXPECT_EQ(remaining_memory.at(3),
alternate_memory_size - (3 * 16 + 128 + 16));
for (int i = 4; i <= 13; ++i) {
EXPECT_EQ(remaining_memory.at(i),
alternate_memory_size - (3 * 16 + 128 + 128 + 16));
}
EXPECT_EQ(remaining_memory.at(14),
alternate_memory_size - (2 * 16 + 128 + 128 + 16));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithoutOverlap) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 192;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 2);
std::optional<int> expected_op14_copy_start_time;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op1") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 1);
EXPECT_GT(prefetch->copy_start_schedule_after(), 1);
expected_op14_copy_start_time = prefetch->copy_start_schedule_after();
}
}
EXPECT_TRUE(expected_op14_copy_start_time.has_value());
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op14") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 14);
EXPECT_EQ(prefetch->copy_start_schedule_after(),
*expected_op14_copy_start_time);
}
}
EXPECT_GT(optimizer->CalculateExecutionTime(), 12.5);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, PrefetchFifoOrderWithOverlap2) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op1 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op1)
$op3 = f32[1,4] add(f32[1,4] $op1, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $op12, f32[1,4] $op13)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
int64_t alternate_memory_size = 432;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str, alternate_memory_size,
loop_start_idx, &optimizer));
optimizer->Optimize();
std::vector<const CopyAllocation*> prefetches;
for (const MemoryBoundLoopOptimizer::LoopValue& loop_value :
optimizer->loop_values()) {
if (!loop_value.allocations.empty() &&
loop_value.allocations.back()->is_copy_allocation()) {
prefetches.push_back(static_cast<const CopyAllocation*>(
loop_value.allocations.back().get()));
}
}
EXPECT_EQ(prefetches.size(), 3);
bool seen_overlap = false;
bool seen_nonoverlap = false;
for (const CopyAllocation* prefetch : prefetches) {
const HloUse& use = *prefetch->uses().begin();
if (use.instruction->name() == "op13") {
EXPECT_EQ(prefetch->copy_done_schedule_before(), 13);
EXPECT_EQ(prefetch->copy_start_schedule_after(), 14);
} else {
ASSERT_EQ(use.instruction->name(), "op0");
EXPECT_EQ(prefetch->copy_done_schedule_before(), 0);
if (prefetch->copy_start_schedule_after() == 14) {
EXPECT_FALSE(seen_overlap);
seen_overlap = true;
} else {
EXPECT_LT(prefetch->copy_start_schedule_after(), 14);
EXPECT_FALSE(seen_nonoverlap);
seen_nonoverlap = true;
}
}
}
EXPECT_EQ(optimizer->CalculateExecutionTime(), 12.5);
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEnd) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op13, f32[1,4] $prev_op14)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op14, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
$op5 = f32[1,4] add(f32[1,4] $op3, f32[1,4] $op4)
$op6 = f32[1,4] add(f32[1,4] $op4, f32[1,4] $op5)
$op7 = f32[1,4] add(f32[1,4] $op5, f32[1,4] $op6)
$op8 = f32[1,4] add(f32[1,4] $op6, f32[1,4] $op7)
$op9 = f32[1,4] add(f32[1,4] $op7, f32[1,4] $op8)
$op10 = f32[1,4] add(f32[1,4] $op8, f32[1,4] $op9)
$op11 = f32[1,4] add(f32[1,4] $op9, f32[1,4] $op10)
$op12 = f32[1,4] add(f32[1,4] $op10, f32[1,4] $op11)
$op13 = f32[1,4] add(f32[1,4] $op11, f32[1,4] $op12)
$op14 = f32[1,4] add(f32[1,4] $param2, f32[1,4] $op13)
ROOT $root = tuple($op1, $op14)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str,
1024,
loop_start_idx, &optimizer));
optimizer->Optimize();
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 1024));
TF_ASSERT_OK(VerifyMsaEquivalence(module.get()));
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndUnsupportedAllocation) {
absl::string_view hlo_loop_str = R"(
$op0 = f32[1,4] add(f32[1,4] $prev_op3, f32[1,4] $prev_op4)
$op1 = f32[8,4] add(f32[8,4] $param0, f32[8,4] $param1)
$op2 = f32[1,4] add(f32[1,4] $prev_op2, f32[1,4] $op0)
$op3 = f32[1,4] add(f32[1,4] $op0, f32[1,4] $op2)
$op4 = f32[1,4] add(f32[1,4] $op2, f32[1,4] $op3)
ROOT $root = tuple($op1, $op4)
)";
int loop_start_idx;
MemoryBoundLoopOptimizer* optimizer;
TF_ASSERT_OK_AND_ASSIGN(
auto module, ParseAndCreateOptimizer(hlo_loop_str,
1024,
loop_start_idx, &optimizer));
optimizer->Optimize();
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 1024));
TF_ASSERT_OK(VerifyMsaEquivalence(module.get(),
true));
const HloInstruction* op2 = FindInstruction(module.get(), "op2");
EXPECT_EQ(op2->shape().layout().memory_space(), kAlternateMemorySpace);
}
TEST_F(MemoryBoundLoopOptimizerTest, TempAndPinnedAllocations) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=5
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
pinned_prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=1
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_op4 = f32[1,4] multiply(f32[1,4] pinned_prev_param0, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=5
ROOT root = tuple(pinned_prev_param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = pred[] parameter(4)
copy = f32[1,4] copy(p3)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, copy, p4)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
int64_t alternate_memory_size = 64;
TF_ASSERT_OK_AND_ASSIGN(
auto optimizer,
CreateOptimizer(19, 24, module.get(), alternate_memory_size));
optimizer->Optimize();
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(1), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(2), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(3), alternate_memory_size - (3 * 16 + 16));
EXPECT_EQ(remaining_memory.at(4), alternate_memory_size - (2 * 16 + 16));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, NegativeSavingNotPinned) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=5
}
while_body {
while_body_param = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
pinned_prev_param0 = f32[28,4] get-tuple-element(while_body_param), index=0
zero = s32[] constant(0)
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=1
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
pinned_slice = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
prev_op4 = f32[1,4] multiply(f32[1,4] pinned_slice, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
pinned_slice2 = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
op4 = f32[1,4] multiply(f32[1,4] pinned_slice2, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
pinned_slice3 = f32[1,4] dynamic-slice(pinned_prev_param0, zero, zero), dynamic_slice_sizes={1,4}
next_op4 = f32[1,4] multiply(f32[1,4] pinned_slice3, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=5
ROOT root = tuple(pinned_prev_param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[28,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = pred[] parameter(4)
copy = f32[1,4] copy(p3)
tuple = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, copy, p4)
while = (f32[28,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=4
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
int64_t alternate_memory_size = 52;
TF_ASSERT_OK_AND_ASSIGN(
auto optimizer,
CreateOptimizer(21, 27, module.get(), alternate_memory_size));
optimizer->Optimize();
const std::vector<int64_t>& remaining_memory = optimizer->remaining_memory();
EXPECT_EQ(remaining_memory.at(0), alternate_memory_size - (3 * 16 + 4));
EXPECT_EQ(optimizer->MaxAlternateMemoryUsed(), alternate_memory_size);
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndWhileLoop) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=6
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
param0 = f32[1,4] get-tuple-element(while_body_param), index=1
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=4
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_op4 = f32[1,4] multiply(f32[1,4] prev_param0, f32[1,4] prev_op3)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
op4 = f32[1,4] multiply(f32[1,4] param0, f32[1,4] op3)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_op4 = f32[1,4] multiply(f32[1,4] next_param0, f32[1,4] next_op3)
p = pred[] get-tuple-element(while_body_param), index=6
ROOT root = tuple(prev_param0, param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, p)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = pred[] parameter(5)
copy = f32[1,4] copy(p4)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, p4, copy, p5)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 512));
TF_ASSERT_OK_AND_ASSIGN(auto alias_analysis,
HloAliasAnalysis::Run(module.get()));
TF_ASSERT_OK_AND_ASSIGN(auto hlo_live_range,
HloLiveRange::Run(module->schedule(), *alias_analysis,
module->entry_computation()));
const HloInstruction* prev_copy_done =
FindInstruction(module.get(), "prev_op4")->operand(0);
const HloInstruction* copy_done =
FindInstruction(module.get(), "op4")->operand(0);
const HloInstruction* next_copy_done =
FindInstruction(module.get(), "next_op4")->operand(0);
ASSERT_EQ(prev_copy_done->opcode(), HloOpcode::kCopyDone);
ASSERT_EQ(copy_done->opcode(), HloOpcode::kCopyDone);
ASSERT_EQ(next_copy_done->opcode(), HloOpcode::kCopyDone);
EXPECT_EQ(prev_copy_done->shape().layout().memory_space(),
kAlternateMemorySpace);
EXPECT_EQ(copy_done->shape().layout().memory_space(), kAlternateMemorySpace);
EXPECT_EQ(next_copy_done->shape().layout().memory_space(),
kAlternateMemorySpace);
auto prefetch_distance = [&](const HloInstruction* copy_done) {
return hlo_live_range->instruction_schedule().at(copy_done) -
hlo_live_range->instruction_schedule().at(copy_done->operand(0));
};
EXPECT_EQ(prefetch_distance(prev_copy_done), prefetch_distance(copy_done));
EXPECT_EQ(prefetch_distance(next_copy_done), prefetch_distance(copy_done));
}
TEST_F(MemoryBoundLoopOptimizerTest, OptimizerEndToEndNestedWhileLoopBug) {
absl::string_view hlo_str = R"(
HloModule module, is_scheduled=true
prev_while_cond {
prev_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(prev_while_cond_param), index=1
}
prev_while_body {
prev_while_body_param = (f32[1,4], pred[]) parameter(0)
prev_while_body_gte = f32[1,4] get-tuple-element(prev_while_body_param), index=0
prev_while_body_pred = pred[] get-tuple-element(prev_while_body_param), index=1
prev_while_body_op = f32[1,4] negate(prev_while_body_gte)
ROOT prev_while_body_root = (f32[1,4], pred[]) tuple(prev_while_body_op, prev_while_body_pred)
}
current_while_cond {
current_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(current_while_cond_param), index=1
}
current_while_body {
current_while_body_param = (f32[1,4], pred[]) parameter(0)
current_while_body_gte = f32[1,4] get-tuple-element(current_while_body_param), index=0
current_while_body_pred = pred[] get-tuple-element(current_while_body_param), index=1
current_while_body_op = f32[1,4] negate(current_while_body_gte)
ROOT current_while_body_root = (f32[1,4], pred[]) tuple(current_while_body_op, current_while_body_pred)
}
next_while_cond {
next_while_cond_param = (f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(next_while_cond_param), index=1
}
next_while_body {
next_while_body_param = (f32[1,4], pred[]) parameter(0)
next_while_body_gte = f32[1,4] get-tuple-element(next_while_body_param), index=0
next_while_body_pred = pred[] get-tuple-element(next_while_body_param), index=1
next_while_body_op = f32[1,4] negate(next_while_body_gte)
ROOT next_while_body_root = (f32[1,4], pred[]) tuple(next_while_body_op, next_while_body_pred)
}
while_cond {
while_cond_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
ROOT p = pred[] get-tuple-element(while_cond_param), index=6
}
while_body {
while_body_param = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) parameter(0)
prev_param0 = f32[1,4] get-tuple-element(while_body_param), index=0
param0 = f32[1,4] get-tuple-element(while_body_param), index=1
next_param0 = f32[1,4] get-tuple-element(while_body_param), index=2
prev_prev_op3 = f32[1,4] get-tuple-element(while_body_param), index=3
prev_prev_op4 = f32[1,4] get-tuple-element(while_body_param), index=4
while_pred = pred[] get-tuple-element(while_body_param), index=6
prev_op0 = f32[1,4] add(f32[1,4] prev_prev_op3, f32[1,4] prev_prev_op4)
prev_op1 = f32[1,4] add(f32[1,4] prev_prev_op4, f32[1,4] prev_op0)
prev_op2 = f32[1,4] add(f32[1,4] prev_op0, f32[1,4] prev_op1)
prev_op3 = f32[1,4] add(f32[1,4] prev_op1, f32[1,4] prev_op2)
prev_tuple = (f32[1,4], pred[]) tuple(prev_op3, while_pred)
prev_while = (f32[1,4], pred[]) while(prev_tuple), condition=prev_while_cond, body=prev_while_body
prev_gte = f32[1,4] get-tuple-element(prev_while), index=0
prev_op4 = f32[1,4] multiply(f32[1,4] prev_param0, f32[1,4] prev_gte)
op0 = f32[1,4] add(f32[1,4] prev_op3, f32[1,4] prev_op4)
op1 = f32[1,4] add(f32[1,4] prev_op4, f32[1,4] op0)
op2 = f32[1,4] add(f32[1,4] op0, f32[1,4] op1)
op3 = f32[1,4] add(f32[1,4] op1, f32[1,4] op2)
current_tuple = (f32[1,4], pred[]) tuple(op3, while_pred)
current_while = (f32[1,4], pred[]) while(current_tuple), condition=current_while_cond, body=current_while_body
current_gte = f32[1,4] get-tuple-element(current_while), index=0
op4 = f32[1,4] multiply(f32[1,4] param0, f32[1,4] current_gte)
next_op0 = f32[1,4] add(f32[1,4] op3, f32[1,4] op4)
next_op1 = f32[1,4] add(f32[1,4] op4, f32[1,4] next_op0)
next_op2 = f32[1,4] add(f32[1,4] next_op0, f32[1,4] next_op1)
next_op3 = f32[1,4] add(f32[1,4] next_op1, f32[1,4] next_op2)
next_tuple = (f32[1,4], pred[]) tuple(next_op3, while_pred)
next_while = (f32[1,4], pred[]) while(next_tuple), condition=next_while_cond, body=next_while_body
next_gte = f32[1,4] get-tuple-element(next_while), index=0
next_op4 = f32[1,4] multiply(f32[1,4] next_param0, f32[1,4] next_gte)
ROOT root = tuple(prev_param0, param0, next_param0, prev_prev_op3, prev_prev_op4, next_op4, while_pred)
}
ENTRY entry {
p0 = f32[1,4] parameter(0)
p1 = f32[1,4] parameter(1)
p2 = f32[1,4] parameter(2)
p3 = f32[1,4] parameter(3)
p4 = f32[1,4] parameter(4)
p5 = pred[] parameter(5)
copy = f32[1,4] copy(p4)
tuple = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) tuple(p0, p1, p2, p3, p4, copy, p5)
while = (f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], f32[1,4], pred[]) while(tuple), condition=while_cond, body=while_body
ROOT root = f32[1,4] get-tuple-element(while), index=5
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_str));
TF_ASSERT_OK_AND_ASSIGN(auto preset_assignments,
RunMsa(module.get(), 512));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/memory_bound_loop_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
35055de0-39d9-4889-8071-f4779761f1e2 | cpp | tensorflow/tensorflow | example_proto_helper | tensorflow/core/util/example_proto_helper.cc | tensorflow/core/util/example_proto_helper_test.cc | #include "tensorflow/core/util/example_proto_helper.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
Status CheckValidType(const DataType& dtype) {
switch (dtype) {
case DT_INT64:
case DT_FLOAT:
case DT_STRING:
return absl::OkStatus();
default:
return errors::InvalidArgument("Received input dtype: ",
DataTypeString(dtype));
}
}
Status CheckTypesMatch(const Feature& feature, const DataType& dtype,
bool* match) {
switch (dtype) {
case DT_INT64:
*match = (feature.kind_case() == Feature::kInt64List);
break;
case DT_FLOAT:
*match = (feature.kind_case() == Feature::kFloatList);
break;
case DT_STRING:
*match = (feature.kind_case() == Feature::kBytesList);
break;
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
return absl::OkStatus();
}
Status FeatureDenseCopy(const std::size_t out_index, const string& name,
const string& key, const DataType& dtype,
const TensorShape& shape, const Feature& feature,
Tensor* out) {
const std::size_t num_elements = shape.num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of int64 values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<int64_t>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key: ", key, ", Index: ", out_index,
". Number of float values != expected. "
"values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<float>().data() + offset;
std::copy_n(values.value().data(), num_elements, out_p);
return absl::OkStatus();
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
if (static_cast<size_t>(values.value_size()) != num_elements) {
return errors::InvalidArgument(
"Name: ", name, ", Key ", key, ", Index: ", out_index,
". Number of bytes values != expected. "
"Values size: ",
values.value_size(), " but output shape: ", shape.DebugString());
}
auto out_p = out->flat<tstring>().data() + offset;
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return absl::OkStatus();
}
default:
return errors::InvalidArgument("Invalid input dtype: ",
DataTypeString(dtype));
}
}
Tensor FeatureSparseCopy(const std::size_t batch, const string& key,
const DataType& dtype, const Feature& feature) {
switch (dtype) {
case DT_INT64: {
const Int64List& values = feature.int64_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<int64_t>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_FLOAT: {
const FloatList& values = feature.float_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<float>().data();
std::copy_n(values.value().data(), num_elements, out_p);
return out;
}
case DT_STRING: {
const BytesList& values = feature.bytes_list();
const int64_t num_elements = values.value_size();
Tensor out(dtype, TensorShape({num_elements}));
auto out_p = out.flat<tstring>().data();
std::transform(values.value().data(),
values.value().data() + num_elements, out_p,
[](const string* s) { return *s; });
return out;
}
default:
LOG(FATAL) << "not supposed to be here. dtype requested: " << dtype;
}
}
int64_t CopyIntoSparseTensor(const Tensor& in, const int batch,
const int64_t offset, Tensor* indices,
Tensor* values) {
const int64_t num_elements = in.shape().num_elements();
const DataType& dtype = in.dtype();
CHECK_EQ(dtype, values->dtype());
if (num_elements > 0) {
auto ix_t = indices->matrix<int64_t>();
int64_t* ix_p = &ix_t(offset, 0);
for (int64_t i = 0; i < num_elements; ++i, ix_p += 2) {
*ix_p = batch;
*(ix_p + 1) = i;
}
}
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
values->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
values->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
values->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
return num_elements;
}
void RowDenseCopy(const std::size_t& out_index, const DataType& dtype,
const Tensor& in, Tensor* out) {
const std::size_t num_elements = in.shape().num_elements();
const std::size_t offset = out_index * num_elements;
switch (dtype) {
case DT_INT64: {
std::copy_n(in.flat<int64_t>().data(), num_elements,
out->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy_n(in.flat<float>().data(), num_elements,
out->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::copy_n(in.flat<tstring>().data(), num_elements,
out->flat<tstring>().data() + offset);
break;
}
default:
LOG(FATAL) << "Not supposed to be here. Saw dtype: " << dtype;
}
}
Status SingleExampleProtoToTensors(
const Example& example, const string& example_name, const int batch_index,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features,
std::vector<Tensor*>* output_dense_values_tensor,
std::vector<std::vector<Tensor>>* output_sparse_values_tmp) {
const Features& features = example.features();
const auto& feature_dict = features.feature();
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& feature_config = fixed_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const TensorShape& shape = feature_config.shape;
const Tensor& default_value = feature_config.default_value;
bool required = (default_value.NumElements() == 0);
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
const bool required_ok = feature_has_data || !required;
if (!required_ok) {
return errors::InvalidArgument("Name: ", example_name, ", Feature: ", key,
" is required but could not be found.");
}
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
TF_RETURN_IF_ERROR(FeatureDenseCopy(batch_index, example_name, key, dtype,
shape, f,
(*output_dense_values_tensor)[d]));
} else {
RowDenseCopy(batch_index, dtype, default_value,
(*output_dense_values_tensor)[d]);
}
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const string& key = feature_config.key;
const DataType& dtype = feature_config.dtype;
const auto& feature_found = feature_dict.find(key);
const bool feature_has_data =
(feature_found != feature_dict.end() &&
(feature_found->second.kind_case() != Feature::KIND_NOT_SET));
if (feature_has_data) {
const Feature& f = feature_found->second;
bool types_match;
TF_RETURN_IF_ERROR(CheckTypesMatch(f, dtype, &types_match));
if (!types_match) {
return errors::InvalidArgument("Name: ", example_name,
", Feature: ", key,
". Data types don't match. ",
"Expected type: ", DataTypeString(dtype),
" Feature is: ", f.DebugString());
}
(*output_sparse_values_tmp)[d][batch_index] =
FeatureSparseCopy(batch_index, key, dtype, f);
} else {
(*output_sparse_values_tmp)[d][batch_index] =
Tensor(dtype, TensorShape({0}));
}
}
return absl::OkStatus();
}
Status GetSparseTensorShapes(const VarLenFeature& var_len_feature,
const std::vector<Tensor>& sparse_values_tmp,
const int batch_size,
VarLenFeatureBatchShapes* output_shapes) {
int64_t total_num_features = 0;
int64_t max_num_features = 0;
for (int b = 0; b < batch_size; ++b) {
const Tensor& t = sparse_values_tmp[b];
const int64_t num_elements = t.shape().num_elements();
total_num_features += num_elements;
max_num_features = std::max(max_num_features, num_elements);
}
output_shapes->indices_shape.AddDim(total_num_features);
output_shapes->indices_shape.AddDim(2);
output_shapes->values_shape.AddDim(total_num_features);
output_shapes->max_num_features = max_num_features;
return absl::OkStatus();
}
Status BatchExampleProtoToTensors(
const std::vector<const Example*>& examples,
const std::vector<string>& names,
const std::vector<FixedLenFeature>& fixed_len_features,
const std::vector<VarLenFeature>& var_len_features, Allocator* allocator,
std::vector<Tensor>* output_dense_values_tensor,
std::vector<Tensor>* output_sparse_indices_tensor,
std::vector<Tensor>* output_sparse_values_tensor,
std::vector<Tensor>* output_sparse_shapes_tensor) {
const int batch_size = examples.size();
const bool has_names = (!names.empty());
if (has_names) {
if (names.size() != examples.size()) {
return errors::InvalidArgument(
"Expected len(names) == len(examples), but got: ", names.size(),
" vs. ", examples.size());
}
}
std::vector<Tensor*> output_dense_values_tensor_ptrs(
fixed_len_features.size());
for (size_t d = 0; d < fixed_len_features.size(); ++d) {
const FixedLenFeature& config = fixed_len_features[d];
TensorShape out_shape;
out_shape.AddDim(batch_size);
const TensorShape& shape = config.shape;
const DataType& dtype = config.dtype;
for (const int dim : shape.dim_sizes()) out_shape.AddDim(dim);
(*output_dense_values_tensor)[d] = Tensor(allocator, dtype, out_shape);
output_dense_values_tensor_ptrs[d] = &(*output_dense_values_tensor)[d];
}
std::vector<std::vector<Tensor>> sparse_values_tmp(var_len_features.size());
for (size_t d = 0; d < var_len_features.size(); ++d) {
sparse_values_tmp[d] = std::vector<Tensor>(batch_size);
}
for (size_t b = 0; b < examples.size(); ++b) {
const Example& ex = *(examples[b]);
const string& example_name = (has_names) ? names[b] : "<unknown>";
TF_RETURN_IF_ERROR(SingleExampleProtoToTensors(
ex, example_name, b, fixed_len_features, var_len_features,
&output_dense_values_tensor_ptrs, &sparse_values_tmp));
}
for (size_t d = 0; d < var_len_features.size(); ++d) {
const VarLenFeature& feature_config = var_len_features[d];
const DataType& dtype = feature_config.dtype;
const std::vector<Tensor>& sparse_values_tensor = sparse_values_tmp[d];
VarLenFeatureBatchShapes sparse_tensor_batch_shapes;
TF_RETURN_IF_ERROR(GetSparseTensorShapes(feature_config,
sparse_values_tensor, batch_size,
&sparse_tensor_batch_shapes));
const TensorShape& indices_shape = sparse_tensor_batch_shapes.indices_shape;
const TensorShape& values_shape = sparse_tensor_batch_shapes.values_shape;
(*output_sparse_indices_tensor)[d] =
Tensor(allocator, DT_INT64, indices_shape);
(*output_sparse_values_tensor)[d] = Tensor(allocator, dtype, values_shape);
(*output_sparse_shapes_tensor)[d] =
Tensor(allocator, DT_INT64, TensorShape({2}));
auto shape_t = (*output_sparse_shapes_tensor)[d].vec<int64_t>();
shape_t(0) = batch_size;
shape_t(1) = sparse_tensor_batch_shapes.max_num_features;
Tensor* sp_indices_d = &(*output_sparse_indices_tensor)[d];
Tensor* sp_values_d = &(*output_sparse_values_tensor)[d];
int64_t offset = 0;
for (int b = 0; b < batch_size; ++b) {
const int64_t num_elements = CopyIntoSparseTensor(
sparse_values_tensor[b], b, offset, sp_indices_d, sp_values_d);
offset += num_elements;
}
}
return absl::OkStatus();
}
Status ParseExampleAttrs::FinishInit(int op_version) {
switch (op_version) {
case 1:
num_ragged = 0;
break;
case 2:
num_dense = dense_types.size();
num_ragged = ragged_value_types.size();
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
if (static_cast<size_t>(num_sparse) != sparse_types.size()) {
return errors::InvalidArgument("len(sparse_keys) != len(sparse_types)");
}
if (static_cast<size_t>(num_dense) != dense_types.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_types)");
}
if (static_cast<size_t>(num_dense) != dense_shapes.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_shapes)");
}
if (static_cast<size_t>(num_ragged) != ragged_value_types.size()) {
return errors::InvalidArgument(
"len(ragged_keys) != len(ragged_value_types)");
}
if (static_cast<size_t>(num_ragged) != ragged_split_types.size()) {
return errors::InvalidArgument(
"len(ragged_keys) != len(ragged_split_types)");
}
if (num_dense > std::numeric_limits<int32>::max()) {
return errors::InvalidArgument("num_dense_ too large");
}
for (const DataType& type : dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid ragged_split_type: ",
DataTypeString(type));
}
}
return absl::OkStatus();
}
Status ParseSingleExampleAttrs::FinishInit() {
if (sparse_keys.size() != sparse_types.size()) {
return errors::InvalidArgument("len(sparse_keys) != len(sparse_types)");
}
if (dense_keys.size() != dense_types.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_types)");
}
if (dense_keys.size() != dense_shapes.size()) {
return errors::InvalidArgument("len(dense_keys) != len(dense_shapes)");
}
for (const DataType& type : dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
return absl::OkStatus();
}
Status ParseSequenceExampleAttrs::FinishInit(int op_version) {
switch (op_version) {
case 1:
num_context_ragged = 0;
num_feature_list_ragged = 0;
if (num_context_sparse != context_sparse_keys.size()) {
return errors::InvalidArgument(
"num_context_sparse (", num_context_sparse,
") must match the size of context_sparse_keys (",
context_sparse_keys.size(), ")");
}
if (num_context_dense != context_dense_keys.size()) {
return errors::InvalidArgument(
"num_context_dense (", num_context_dense,
") must match the size of context_dense_keys (",
context_dense_keys.size(), ")");
}
if (num_feature_list_sparse != feature_list_sparse_keys.size()) {
return errors::InvalidArgument(
"num_feature_list_sparse (", num_feature_list_sparse,
") must match the size of feature_list_sparse_keys (",
feature_list_sparse_keys.size(), ")");
}
if (num_feature_list_dense != feature_list_dense_keys.size()) {
return errors::InvalidArgument(
"num_feature_list_dense (", num_feature_list_dense,
") must match the size of feature_list_dense_keys (",
feature_list_dense_keys.size(), ")");
}
break;
case 2:
num_context_dense = context_dense_types.size();
num_context_ragged = context_ragged_value_types.size();
num_feature_list_ragged = feature_list_ragged_value_types.size();
break;
default:
return errors::InvalidArgument("Unexpected op_version", op_version);
}
if (num_context_sparse != context_sparse_types.size()) {
return errors::InvalidArgument(
"num_context_sparse (", num_context_sparse,
") must match the size of context_sparse_types (",
context_sparse_types.size(), ")");
}
if (num_context_dense != context_dense_types.size() ||
num_context_dense != context_dense_shapes.size()) {
return errors::InvalidArgument(
"num_context_dense (", num_context_dense,
") must match the size of context_dense_types (",
context_dense_types.size(), ") and context_dense_shapes (",
context_dense_shapes.size(), ")");
}
if ((num_context_ragged != context_ragged_value_types.size()) ||
(num_context_ragged != context_ragged_split_types.size())) {
return errors::InvalidArgument(
"num_context_ragged (", num_context_ragged,
") must match the size of context_ragged_value_types (",
context_ragged_value_types.size(), ") and context_ragged_split_types (",
context_ragged_split_types.size(), ")");
}
if (num_feature_list_sparse != feature_list_sparse_types.size()) {
return errors::InvalidArgument(
"num_feature_list_sparse (", num_feature_list_sparse,
") must match the size of feature_list_sparse_types (",
feature_list_sparse_types.size(), ")");
}
if (num_feature_list_dense != feature_list_dense_types.size() ||
num_feature_list_dense != feature_list_dense_shapes.size()) {
return errors::InvalidArgument(
"num_feature_list_dense (", num_feature_list_dense,
") must match the size of feature_list_dense_types (",
feature_list_dense_types.size(), ") and feature_list_dense_shapes (",
feature_list_dense_shapes.size(), ")");
}
if ((num_feature_list_ragged != feature_list_ragged_value_types.size()) ||
(num_feature_list_ragged != feature_list_ragged_split_types.size())) {
return errors::InvalidArgument(
"num_feature_list_ragged (", num_feature_list_ragged,
") must match the size of feature_list_ragged_value_types (",
feature_list_ragged_value_types.size(),
") and feature_list_ragged_split_types (",
feature_list_ragged_split_types.size(), ")");
}
for (const DataType& type : context_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid context_ragged_split_type: ",
DataTypeString(type));
}
}
for (const DataType& type : feature_list_ragged_value_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_ragged_split_types) {
if (!(type == DT_INT64 || type == DT_INT32)) {
return errors::InvalidArgument("Invalid feature_list_ragged_split_type: ",
DataTypeString(type));
}
}
return absl::OkStatus();
}
Status ParseSingleSequenceExampleAttrs::FinishInit() {
if (static_cast<size_t>(num_context_sparse) != context_sparse_types.size()) {
return errors::InvalidArgument(
"len(context_sparse_keys) != len(context_sparse_types)");
}
if (static_cast<size_t>(num_context_dense) != context_dense_types.size()) {
return errors::InvalidArgument(
"len(context_dense_keys) != len(context_dense_types)");
}
if (static_cast<size_t>(num_context_dense) != context_dense_shapes.size()) {
return errors::InvalidArgument(
"len(context_dense_keys) != len(context_dense_shapes)");
}
if (static_cast<size_t>(num_feature_list_sparse) !=
feature_list_sparse_types.size()) {
return errors::InvalidArgument(
"len(feature_list_sparse_keys) != len(feature_list_sparse_types)");
}
if (static_cast<size_t>(num_feature_list_dense) !=
feature_list_dense_types.size()) {
return errors::InvalidArgument(
"len(feature_list_dense_keys) != "
"len(feature_list_dense_types)");
}
for (const DataType& type : context_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : context_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_dense_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
for (const DataType& type : feature_list_sparse_types) {
TF_RETURN_IF_ERROR(CheckValidType(type));
}
return absl::OkStatus();
}
Status GetDenseShapes(const std::vector<PartialTensorShape>& dense_shapes,
std::vector<bool>* variable_length,
std::vector<std::size_t>* elements_per_stride) {
for (int i = 0; i < dense_shapes.size(); ++i) {
bool shape_ok = true;
if (dense_shapes[i].dims() == -1) {
shape_ok = false;
} else {
for (int d = 1; d < dense_shapes[i].dims(); ++d) {
if (dense_shapes[i].dim_size(d) == -1) {
shape_ok = false;
}
}
}
if (!shape_ok) {
return errors::InvalidArgument(
"dense_shapes[", i,
"] has unknown rank or unknown inner dimensions: ",
dense_shapes[i].DebugString());
}
TensorShape dense_shape;
if (dense_shapes[i].dims() > 0 && dense_shapes[i].dim_size(0) == -1) {
variable_length->push_back(true);
for (int d = 1; d < dense_shapes[i].dims(); ++d) {
dense_shape.AddDim(dense_shapes[i].dim_size(d));
}
} else {
variable_length->push_back(false);
dense_shapes[i].AsTensorShape(&dense_shape);
}
elements_per_stride->push_back(dense_shape.num_elements());
}
return absl::OkStatus();
}
} | #include "tensorflow/core/util/example_proto_helper.h"
#include <cstdint>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
namespace tensorflow {
namespace {
TEST(CopyIntoSparseTensorTest, String) {
Tensor in_tensor(DT_STRING, TensorShape({2}));
in_tensor.flat<tstring>()(0) = "hello";
in_tensor.flat<tstring>()(1) = "world";
int n_values = 5;
Tensor ix_tensor(DT_INT64, TensorShape({n_values, 2}));
auto ix_matrix = ix_tensor.matrix<int64_t>();
for (int i = 0; i < n_values; ++i) {
for (int j = 0; j < 2; ++j) {
ix_matrix(i, j) = 0;
}
}
Tensor value_tensor(DT_STRING, TensorShape({n_values}));
int batch = 67;
int64_t offset = 1;
auto n_elems =
CopyIntoSparseTensor(in_tensor, batch, offset, &ix_tensor, &value_tensor);
EXPECT_EQ(2, n_elems);
EXPECT_EQ(0, ix_matrix(0, 0));
EXPECT_EQ(0, ix_matrix(0, 1));
EXPECT_EQ(batch, ix_matrix(1, 0));
EXPECT_EQ(0, ix_matrix(1, 1));
EXPECT_EQ(batch, ix_matrix(2, 0));
EXPECT_EQ(1, ix_matrix(2, 1));
EXPECT_EQ(0, ix_matrix(3, 0));
EXPECT_EQ(0, ix_matrix(3, 1));
EXPECT_EQ(0, ix_matrix(4, 0));
EXPECT_EQ(0, ix_matrix(4, 1));
auto values = value_tensor.flat<tstring>();
EXPECT_EQ("", values(0));
EXPECT_EQ("hello", values(1));
EXPECT_EQ("world", values(2));
EXPECT_EQ("", values(3));
EXPECT_EQ("", values(4));
}
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
class SingleExampleProtoToTensorsTest : public ::testing::Test {
protected:
void SetUp() override {
FixedLenFeature int64_dense_config;
int64_dense_config.key = kDenseInt64Key;
int64_dense_config.dtype = DT_INT64;
int64_dense_config.shape = TensorShape({1});
int64_dense_config.default_value = Tensor(DT_INT64, TensorShape({1}));
int64_dense_config.default_value.scalar<int64_t>()() = 0;
dense_vec_.push_back(int64_dense_config);
FixedLenFeature float_dense_config;
float_dense_config.key = kDenseFloatKey;
float_dense_config.dtype = DT_FLOAT;
float_dense_config.shape = TensorShape({1});
float_dense_config.default_value = Tensor(DT_FLOAT, TensorShape({1}));
float_dense_config.default_value.scalar<float>()() = 0.0;
dense_vec_.push_back(float_dense_config);
FixedLenFeature string_dense_config;
string_dense_config.key = kDenseStringKey;
string_dense_config.dtype = DT_STRING;
string_dense_config.shape = TensorShape({1});
string_dense_config.default_value = Tensor(DT_STRING, TensorShape({1}));
string_dense_config.default_value.scalar<tstring>()() = "default";
dense_vec_.push_back(string_dense_config);
VarLenFeature int64_sparse_config;
int64_sparse_config.key = kSparseInt64Key;
int64_sparse_config.dtype = DT_INT64;
sparse_vec_.push_back(int64_sparse_config);
VarLenFeature float_sparse_config;
float_sparse_config.key = kSparseFloatKey;
float_sparse_config.dtype = DT_FLOAT;
sparse_vec_.push_back(float_sparse_config);
VarLenFeature string_sparse_config;
string_sparse_config.key = kSparseStringKey;
string_sparse_config.dtype = DT_STRING;
sparse_vec_.push_back(string_sparse_config);
}
std::vector<FixedLenFeature> dense_vec_;
std::vector<VarLenFeature> sparse_vec_;
};
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kSparseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kSparseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kSparseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(ex, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(42, int64_tensor_vec[0].vec<int64_t>()(0));
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_NEAR(4.2, float_tensor_vec[0].vec<float>()(0), 0.001);
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ("forty-two", string_tensor_vec[0].vec<tstring>()(0));
}
TEST_F(SingleExampleProtoToTensorsTest, SparseOnlyEmpty) {
Example empty;
std::vector<Tensor*> output_dense_values(0);
std::vector<std::vector<Tensor>> output_sparse_values_tmp(3);
for (int i = 0; i < 3; ++i) {
output_sparse_values_tmp[i] = std::vector<Tensor>(1);
}
std::vector<FixedLenFeature> empty_dense_vec;
TF_EXPECT_OK(SingleExampleProtoToTensors(empty, "", 0, empty_dense_vec,
sparse_vec_, &output_dense_values,
&output_sparse_values_tmp));
const std::vector<Tensor>& int64_tensor_vec = output_sparse_values_tmp[0];
EXPECT_EQ(1, int64_tensor_vec.size());
EXPECT_EQ(0, int64_tensor_vec[0].vec<int64_t>().size());
const std::vector<Tensor>& float_tensor_vec = output_sparse_values_tmp[1];
EXPECT_EQ(1, float_tensor_vec.size());
EXPECT_EQ(0, float_tensor_vec[0].vec<float>().size());
const std::vector<Tensor>& string_tensor_vec = output_sparse_values_tmp[2];
EXPECT_EQ(1, string_tensor_vec.size());
EXPECT_EQ(0, string_tensor_vec[0].vec<tstring>().size());
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyTrivial) {
Example ex;
(*ex.mutable_features()->mutable_feature())[kDenseInt64Key]
.mutable_int64_list()
->add_value(42);
(*ex.mutable_features()->mutable_feature())[kDenseFloatKey]
.mutable_float_list()
->add_value(4.2);
(*ex.mutable_features()->mutable_feature())[kDenseStringKey]
.mutable_bytes_list()
->add_value("forty-two");
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
ex, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_TRUE(output_sparse_values_tmp.empty());
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(42, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(4.2, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("forty-two", str_dense_output.matrix<tstring>()(0, 0));
}
TEST_F(SingleExampleProtoToTensorsTest, DenseOnlyDefaults) {
std::vector<Tensor*> output_dense_values(3);
Tensor int64_dense_output(DT_INT64, TensorShape({1, 1}));
output_dense_values[0] = &int64_dense_output;
Tensor float_dense_output(DT_FLOAT, TensorShape({1, 1}));
output_dense_values[1] = &float_dense_output;
Tensor str_dense_output(DT_STRING, TensorShape({1, 1}));
output_dense_values[2] = &str_dense_output;
Example empty;
std::vector<VarLenFeature> empty_sparse_vec;
std::vector<std::vector<Tensor>> output_sparse_values_tmp;
TF_EXPECT_OK(SingleExampleProtoToTensors(
empty, "", 0, dense_vec_, empty_sparse_vec, &output_dense_values,
&output_sparse_values_tmp));
EXPECT_EQ(1, int64_dense_output.matrix<int64_t>().size());
EXPECT_EQ(0, int64_dense_output.matrix<int64_t>()(0, 0));
EXPECT_EQ(1, float_dense_output.matrix<float>().size());
EXPECT_NEAR(0.0, float_dense_output.matrix<float>()(0, 0), 0.001);
EXPECT_EQ(1, str_dense_output.matrix<tstring>().size());
EXPECT_EQ("default", str_dense_output.matrix<tstring>()(0, 0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
524aa7af-ef63-4f60-b2bc-3d00af43a14a | cpp | tensorflow/tensorflow | bitcast_dtypes_expander | third_party/xla/xla/service/bitcast_dtypes_expander.cc | third_party/xla/xla/service/bitcast_dtypes_expander_test.cc | #include "xla/service/bitcast_dtypes_expander.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/builder/lib/arithmetic.h"
#include "xla/hlo/builder/lib/broadcast.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
absl::StatusOr<HloInstruction*> BitcastDtypesExpander::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* input = instruction->mutable_operand(0);
const Shape& from_shape = input->shape();
const Shape& to_shape = instruction->shape();
int input_bit_width = primitive_util::BitWidth(from_shape.element_type());
int output_bit_width = primitive_util::BitWidth(to_shape.element_type());
PrimitiveType input_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(input_bit_width);
PrimitiveType output_logical_type =
primitive_util::UnsignedIntegralTypeForBitWidth(output_bit_width);
if (input_bit_width == output_bit_width) {
return instruction;
}
std::string name =
absl::StrFormat("xla.bitcast_convert_%s_2_%s", from_shape.ToString(),
to_shape.ToString());
HloModule* module = instruction->GetModule();
HloComputation*& computation =
computation_cache_.emplace(name, nullptr).first->second;
if (!computation) {
XlaBuilder b(name);
XlaOp input = Parameter(&b, 0, instruction->operand(0)->shape(), "a");
if (input_bit_width > output_bit_width) {
std::vector<int64_t> broadcasted_input_shape(
from_shape.dimensions().begin(), from_shape.dimensions().end());
std::vector<int64_t> reshaped_input_shape(from_shape.dimensions().begin(),
from_shape.dimensions().end());
broadcasted_input_shape.push_back(input_bit_width / output_bit_width);
reshaped_input_shape.push_back(1);
int64_t output_bit_width_mask = (int64_t{1} << output_bit_width) - 1;
TF_ASSIGN_OR_RETURN(input,
BroadcastTo(Reshape(input, reshaped_input_shape),
broadcasted_input_shape));
input = BitcastConvertType(input, input_logical_type);
TF_ASSIGN_OR_RETURN(Shape input_shape, b.GetShape(input));
XlaOp iota = Iota(&b, input_shape, input_shape.dimensions_size() - 1);
XlaOp iota_m = Mul(ScalarLike(input, output_bit_width), iota);
input = And(ShiftRightLogical(input, iota_m),
ScalarLike(input, output_bit_width_mask));
input = ConvertElementType(input, output_logical_type);
} else if (input_bit_width < output_bit_width) {
input = BitcastConvertType(input, input_logical_type);
input = ConvertElementType(input, output_logical_type);
XlaOp iota_m = Mul(
ConstantR0WithType(&b, output_logical_type, input_bit_width),
Iota(&b,
ShapeUtil::ChangeElementType(from_shape, output_logical_type),
from_shape.rank() - 1));
input = ShiftLeft(input, iota_m);
input = Reduce(input, Zero(&b, output_logical_type),
CreateScalarOrComputation(output_logical_type, &b),
{from_shape.rank() - 1});
}
BitcastConvertType(input, to_shape.element_type());
TF_ASSIGN_OR_RETURN(XlaComputation xla_computation, b.Build());
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
xla_computation.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(
xla_computation.proto(), config));
HloCloneContext context(module);
computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
}
return instruction->parent()->AddInstruction(HloInstruction::CreateCall(
instruction->shape(), instruction->operands(), computation));
}
bool BitcastDtypesExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kBitcastConvert &&
primitive_util::BitWidth(instruction->shape().element_type()) !=
primitive_util::BitWidth(
instruction->operand(0)->shape().element_type());
}
} | #include "xla/service/bitcast_dtypes_expander.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class BitcastDtypesExpanderTest : public HloTestBase {};
TEST_F(BitcastDtypesExpanderTest, S32toS8) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s32[10] parameter(0)
ROOT out = s8[10,4] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S64toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_smaller
ENTRY main {
p = s64[10] parameter(0)
ROOT out = s32[10,2] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, S8toS32) {
absl::string_view hlo_string = R"(
HloModule bitcast_to_larger
ENTRY main {
p = s8[10,4] parameter(0)
ROOT out = s32[10] bitcast-convert(p)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(BitcastDtypesExpanderTest, RewriteInsideWhileTest) {
absl::string_view hlo_string = R"(
HloModule module
body {
p_body = (f32[2], s32[]) parameter(0)
val1 = f32[2] get-tuple-element(p_body), index=0
val2 = s32[] get-tuple-element(p_body), index=1
const = s32[] constant(42)
converted_val2 = s8[4] bitcast-convert(val2)
converted_const = s8[4] bitcast-convert(const)
add = s8[4] add(converted_val2, converted_const)
out_add = s32[] bitcast-convert(add)
ROOT root = (f32[2], s32[]) tuple(val1, out_add)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.0 = f32[2] parameter(0)
param.1 = s32[] parameter(1)
while_init = (f32[2], s32[]) tuple(param.0, param.1)
ROOT while = (f32[2], s32[]) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
BitcastDtypesExpander expander;
TF_ASSERT_OK_AND_ASSIGN(bool changed, expander.Run(module.get()));
EXPECT_TRUE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bitcast_dtypes_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/bitcast_dtypes_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c1c9d13-d0f8-48d2-8b19-bb7cb1d46d0c | cpp | tensorflow/tensorflow | reffed_status_callback | tensorflow/core/util/reffed_status_callback.h | tensorflow/core/util/reffed_status_callback_test.cc | #ifndef TENSORFLOW_CORE_UTIL_REFFED_STATUS_CALLBACK_H_
#define TENSORFLOW_CORE_UTIL_REFFED_STATUS_CALLBACK_H_
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/mutex.h"
namespace tensorflow {
class ReffedStatusCallback : public core::RefCounted {
public:
explicit ReffedStatusCallback(StatusCallback done) : done_(std::move(done)) {}
void UpdateStatus(const Status& s) {
mutex_lock lock(mu_);
status_group_.Update(s);
}
bool ok() {
tf_shared_lock lock(mu_);
return status_group_.ok();
}
Status status() {
tf_shared_lock lock(mu_);
return status_group_.as_summary_status();
}
~ReffedStatusCallback() override { done_(status_group_.as_summary_status()); }
private:
StatusCallback done_;
mutex mu_;
StatusGroup status_group_ TF_GUARDED_BY(mu_);
};
}
#endif | #include "tensorflow/core/util/reffed_status_callback.h"
#include <atomic>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(TestReffedStatusCallback, CallsBackOK) {
bool called = false;
Status status = absl::InvalidArgumentError("");
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_TRUE(status.ok());
}
TEST(TestReffedStatusCallback, CallsBackFail) {
bool called = false;
Status status = absl::OkStatus();
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
cb->UpdateStatus(absl::InternalError("1"));
cb->UpdateStatus(absl::InvalidArgumentError("2"));
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_THAT(status.code(),
::testing::AnyOf(error::INTERNAL, error::INVALID_ARGUMENT));
EXPECT_TRUE(absl::StrContains(status.message(), "1"));
EXPECT_TRUE(absl::StrContains(status.message(), "2"));
}
TEST(TestReffedStatusCallback, RefMulti) {
int called = false;
Status status = absl::OkStatus();
auto done = [&called, &status](const Status& s) {
called = true;
status = s;
};
auto* cb = new ReffedStatusCallback(std::move(done));
cb->Ref();
cb->UpdateStatus(absl::InternalError("1"));
cb->Ref();
cb->UpdateStatus(absl::InternalError("2"));
cb->Unref();
cb->Unref();
EXPECT_FALSE(called);
cb->Unref();
EXPECT_TRUE(called);
EXPECT_TRUE(absl::StrContains(status.message(), "1"));
EXPECT_TRUE(absl::StrContains(status.message(), "2"));
}
TEST(TestReffedStatusCallback, MultiThreaded) {
std::atomic<int> num_called(0);
Status status;
Notification n;
auto done = [&num_called, &status, &n](const Status& s) {
++num_called;
status = s;
n.Notify();
};
auto* cb = new ReffedStatusCallback(std::move(done));
thread::ThreadPool threads(Env::Default(), "test", 3);
for (int i = 0; i < 5; ++i) {
cb->Ref();
threads.Schedule([cb]() {
cb->UpdateStatus(absl::InvalidArgumentError("err"));
cb->Unref();
});
}
cb->Unref();
n.WaitForNotification();
EXPECT_EQ(num_called.load(), 1);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(status.message(), "err"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reffed_status_callback.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/reffed_status_callback_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c721096-8562-4b1d-b7aa-af2b88995ff3 | cpp | tensorflow/tensorflow | strided_slice_logic | tensorflow/lite/kernels/internal/strided_slice_logic.h | tensorflow/lite/kernels/internal/strided_slice_logic_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_STRIDED_SLICE_LOGIC_H_
#include <limits>
#include <vector>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace strided_slice {
inline int Clamp(const int v, const int lo, const int hi) {
TFLITE_DCHECK(!(hi < lo));
if (hi < v) return hi;
if (v < lo) return lo;
return v;
}
inline void StridedSlicePadIndices(tflite::StridedSliceParams* p,
int dim_count) {
TFLITE_CHECK_LE(dim_count, 5);
TFLITE_CHECK_GE(dim_count, p->start_indices_count);
TFLITE_CHECK_EQ(p->start_indices_count, p->stop_indices_count);
TFLITE_CHECK_EQ(p->stop_indices_count, p->strides_count);
const int pad_count = dim_count - p->start_indices_count;
for (int i = p->start_indices_count - 1; i >= 0; --i) {
p->strides[i + pad_count] = p->strides[i];
p->start_indices[i + pad_count] = p->start_indices[i];
p->stop_indices[i + pad_count] = p->stop_indices[i];
}
for (int i = 0; i < pad_count; ++i) {
p->start_indices[i] = 0;
p->stop_indices[i] = 1;
p->strides[i] = 1;
}
p->shrink_axis_mask <<= pad_count;
p->ellipsis_mask <<= pad_count;
p->new_axis_mask <<= pad_count;
p->begin_mask <<= pad_count;
p->end_mask <<= pad_count;
p->begin_mask |= (1 << pad_count) - 1;
p->end_mask |= (1 << pad_count) - 1;
p->start_indices_count = dim_count;
p->stop_indices_count = dim_count;
p->strides_count = dim_count;
}
inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape,
int32_t axis) {
const int32_t axis_size = input_shape.Dims(axis);
int32_t start = params.start_indices[axis];
const int32_t stride = params.strides[axis];
const int32_t begin_mask = (params.begin_mask & 1 << axis);
if (start < 0) {
start += axis_size;
}
if (stride > 0) {
start = Clamp(start, 0, axis_size);
} else {
start = Clamp(start, -1, axis_size - 1);
}
if (begin_mask) {
if (stride > 0) {
start = 0;
} else {
start = axis_size - 1;
}
}
return start;
}
inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis,
int start) {
const auto shrink_axis_mask = params.shrink_axis_mask;
const bool shrink_axis = shrink_axis_mask & (1 << axis);
const int axis_size = input_shape.Dims(axis);
const bool offset = params.offset;
if (shrink_axis) {
if (start >= axis_size) {
return start;
} else {
return start + 1;
}
}
const auto* indices = params.stop_indices;
int end = indices[axis];
if (offset) {
end += start;
}
const int32_t stride = params.strides[axis];
const int32_t end_mask = (params.end_mask & 1 << axis);
if (end < 0) {
end += axis_size;
}
if (stride > 0) {
end = Clamp(end, 0, axis_size);
} else {
end = Clamp(end, -1, axis_size - 1);
}
if (end_mask) {
if (stride > 0) {
end = axis_size;
} else {
end = -1;
}
}
return end;
}
inline int StartForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis) {
const auto begin_mask = params.begin_mask;
const auto* start_indices = params.start_indices;
const auto* strides = params.strides;
const int axis_size = input_shape.Dims(axis);
if (axis_size == 0) {
return 0;
}
int start = start_indices[axis];
if (begin_mask & 1 << axis) {
if (strides[axis] > 0) {
start = std::numeric_limits<int>::lowest();
} else {
start = std::numeric_limits<int>::max();
}
}
if (start < 0) {
start += axis_size;
}
if (strides[axis] > 0) {
start = Clamp(start, 0, axis_size);
} else {
start = Clamp(start, -1, axis_size - 1);
}
return start;
}
inline int StopForAxis(const tflite::StridedSliceParams& params,
const RuntimeShape& input_shape, int axis,
int start_for_axis) {
const auto end_mask = params.end_mask;
const auto shrink_axis_mask = params.shrink_axis_mask;
const auto* stop_indices = params.stop_indices;
const auto* strides = params.strides;
const int axis_size = input_shape.Dims(axis);
if (axis_size == 0) {
return 0;
}
const bool shrink_axis = shrink_axis_mask & (1 << axis);
int stop = stop_indices[axis];
if (shrink_axis) {
return start_for_axis + 1;
}
if (end_mask & (1 << axis)) {
if (strides[axis] > 0) {
stop = std::numeric_limits<int>::max();
} else {
stop = std::numeric_limits<int>::lowest();
}
}
if (stop < 0) {
stop += axis_size;
}
if (strides[axis] > 0) {
stop = Clamp(stop, 0, axis_size);
} else {
stop = Clamp(stop, -1, axis_size - 1);
}
return stop;
}
inline bool LoopCondition(int index, int stop, int stride) {
return stride > 0 ? index >= stop : index <= stop;
}
inline tflite::StridedSliceParams BuildStridedSliceParams(
int begin_mask, int end_mask, int shrink_axis_mask,
const std::vector<int>& start_indices, const std::vector<int>& stop_indices,
const std::vector<int>& strides) {
tflite::StridedSliceParams op_params{};
const int dims_count = start_indices.size();
op_params.start_indices_count = dims_count;
op_params.stop_indices_count = dims_count;
op_params.strides_count = dims_count;
for (int i = 0; i < dims_count; ++i) {
op_params.start_indices[i] = start_indices[i];
op_params.stop_indices[i] = stop_indices[i];
op_params.strides[i] = strides[i];
}
op_params.begin_mask = begin_mask;
op_params.ellipsis_mask = 0;
op_params.end_mask = end_mask;
op_params.new_axis_mask = 0;
op_params.shrink_axis_mask = shrink_axis_mask;
return op_params;
}
}
}
#endif | #include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
#include <initializer_list>
#include <gtest/gtest.h>
namespace tflite {
namespace {
void RunStridedSlicePadIndices(std::initializer_list<int> begin,
std::initializer_list<int> end,
std::initializer_list<int> stride,
std::initializer_list<int> expected_begin,
std::initializer_list<int> expected_end,
std::initializer_list<int> expected_stride) {
StridedSliceParams op_params;
int dims = begin.size();
op_params.start_indices_count = dims;
op_params.stop_indices_count = dims;
op_params.strides_count = dims;
for (int i = 0; i < dims; ++i) {
op_params.start_indices[i] = begin.begin()[i];
op_params.stop_indices[i] = end.begin()[i];
op_params.strides[i] = stride.begin()[i];
}
strided_slice::StridedSlicePadIndices(&op_params, 4);
for (int i = 0; i < 4; ++i) {
EXPECT_EQ(op_params.start_indices[i], expected_begin.begin()[i]);
EXPECT_EQ(op_params.stop_indices[i], expected_end.begin()[i]);
EXPECT_EQ(op_params.strides[i], expected_stride.begin()[i]);
}
}
TEST(RunStridedSlicePadIndices, Pad1) {
RunStridedSlicePadIndices({1, 2, 3},
{4, 5, 6},
{2, 2, 2},
{0, 1, 2, 3},
{1, 4, 5, 6},
{1, 2, 2, 2}
);
}
TEST(RunStridedSlicePadIndices, Pad2) {
RunStridedSlicePadIndices({1, 2},
{4, 5},
{2, 2},
{0, 0, 1, 2},
{1, 1, 4, 5},
{1, 1, 2, 2}
);
}
TEST(RunStridedSlicePadIndices, Pad3) {
RunStridedSlicePadIndices({1},
{4},
{2},
{0, 0, 0, 1},
{1, 1, 1, 4},
{1, 1, 1, 2}
);
}
TEST(StridedSliceStartForAxis, NegativeOOBIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -11;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, NegativeOneTheBoundaryIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -10;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, NegativeWithinBoundsIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -9;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 1);
}
TEST(StridedSliceStartForAxis, MinusOneIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = -1;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 9);
}
TEST(StridedSliceStartForAxis, ZeroIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 0;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 0);
}
TEST(StridedSliceStartForAxis, OneIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 1;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 1);
}
TEST(StridedSliceStartForAxis, PositiveBoundaryIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 9;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 9);
}
TEST(StridedSliceStartForAxis, PositiveOOBIndexSizeofArray) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 10;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 10);
}
TEST(StridedSliceStartForAxis, PositiveOOBIndex) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 11;
params.strides[0] = 1;
int start = strided_slice::StridedSliceStartForAxis(
params, RuntimeShape({10}), 0);
EXPECT_EQ(start, 10);
}
TEST(StridedSliceStartForAxis, TenFourMinus1) {
StridedSliceParams params{};
params.begin_mask = 0;
params.end_mask = 0;
params.start_indices[0] = 5;
params.stop_indices[0] = 2;
params.strides[0] = -1;
int start = strided_slice::StridedSliceStartForAxis(params, RuntimeShape({4}),
0);
int stop = strided_slice::StridedSliceEndForAxis(params, RuntimeShape({4}),
0, start);
EXPECT_EQ(start, 3);
EXPECT_EQ(stop, 2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/strided_slice_logic.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/strided_slice_logic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b33caddb-e189-4706-ad85-019b3a5d26d2 | cpp | google/tensorstore | iterate_over_index_range | tensorstore/util/iterate_over_index_range.h | tensorstore/util/iterate_over_index_range_test.cc | #ifndef TENSORSTORE_UTIL_ITERATE_OVER_INDEX_RANGE_H_
#define TENSORSTORE_UTIL_ITERATE_OVER_INDEX_RANGE_H_
#include <cassert>
#include <type_traits>
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/void_wrapper.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/constant_vector.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_iterate {
inline constexpr DimensionIndex GetLoopDimension(ContiguousLayoutOrder order,
DimensionIndex outer_dims,
DimensionIndex total_dims) {
return order == ContiguousLayoutOrder::c ? outer_dims
: total_dims - 1 - outer_dims;
}
template <typename Func, typename IndexType, DimensionIndex Rank>
using IterateOverIndexRangeResult = std::decay_t<
std::invoke_result_t<Func, tensorstore::span<const IndexType, Rank>>>;
template <ContiguousLayoutOrder Order, typename Func, typename IndexType,
DimensionIndex Rank>
struct IterateOverIndexRangeHelper {
using IndicesSpan = tensorstore::span<const IndexType, Rank>;
using ResultType = IterateOverIndexRangeResult<Func, IndexType, Rank>;
using WrappedResultType = internal::Void::WrappedType<ResultType>;
static WrappedResultType LoopImpl(
Func func, DimensionIndex outer_dims, const IndexType* origin,
const IndexType* shape, tensorstore::span<IndexType, Rank> indices) {
WrappedResultType result =
internal::DefaultIterationResult<WrappedResultType>::value();
const DimensionIndex cur_dim =
GetLoopDimension(Order, outer_dims, indices.size());
const IndexType start = origin[cur_dim];
const IndexType stop = shape[cur_dim] + start;
if (outer_dims + 1 == indices.size()) {
for (IndexType i = start; i < stop; ++i) {
indices[cur_dim] = i;
result = internal::Void::CallAndWrap(func, IndicesSpan(indices));
if (!result) break;
}
} else {
for (IndexType i = start; i < stop; ++i) {
indices[cur_dim] = i;
result = LoopImpl(func, outer_dims + 1, origin, shape, indices);
if (!result) break;
}
}
return result;
}
static ResultType Start(Func func, const IndexType* origin,
IndicesSpan shape) {
if (shape.size() == 0) {
return func(tensorstore::span<const IndexType, Rank>());
}
assert(shape.size() <= kMaxRank);
IndexType indices[kMaxRank];
return internal::Void::Unwrap(LoopImpl(
func, 0, &origin[0], &shape[0],
tensorstore::span<IndexType, Rank>(&indices[0], shape.size())));
}
};
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename IndexType, DimensionIndex Rank, typename Func>
internal_iterate::IterateOverIndexRangeResult<
Func, std::remove_const_t<IndexType>, Rank>
IterateOverIndexRange(tensorstore::span<IndexType, Rank> origin,
tensorstore::span<IndexType, Rank> shape, Func&& func) {
assert(origin.size() == shape.size());
return internal_iterate::IterateOverIndexRangeHelper<
Order, Func, std::remove_const_t<IndexType>, Rank>::Start(func,
origin.data(),
shape);
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename BoxType, typename Func>
std::enable_if_t<IsBoxLike<BoxType>,
internal_iterate::IterateOverIndexRangeResult<
Func, Index, BoxType::static_rank>>
IterateOverIndexRange(const BoxType& box, Func&& func,
ContiguousLayoutOrder order = ContiguousLayoutOrder::c) {
return internal_iterate::IterateOverIndexRangeHelper<
Order, Func, Index, BoxType::static_rank>::Start(func,
box.origin().data(),
box.shape());
}
template <ContiguousLayoutOrder Order = ContiguousLayoutOrder::c,
typename IndexType, DimensionIndex Rank, typename Func>
internal_iterate::IterateOverIndexRangeResult<
Func, std::remove_const_t<IndexType>, Rank>
IterateOverIndexRange(tensorstore::span<IndexType, Rank> shape, Func&& func) {
using NonConstIndex = std::remove_const_t<IndexType>;
return internal_iterate::
IterateOverIndexRangeHelper<Order, Func, NonConstIndex, Rank>::Start(
func,
GetConstantVector<NonConstIndex, 0>(GetStaticOrDynamicExtent(shape))
.data(),
shape);
}
}
#endif | #include "tensorstore/util/iterate_over_index_range.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/index.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::ContiguousLayoutOrder;
using ::tensorstore::Index;
using ::tensorstore::IterateOverIndexRange;
using ::tensorstore::span;
TEST(IterateOverIndexRange, COrder) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {0, 1}, {0, 2},
{1, 0}, {1, 1}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({2, 3}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, FortranOrder) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {1, 0}, {0, 1},
{1, 1}, {0, 2}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span({2, 3}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, COrderWithOrigin) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {0, 2}, {1, 1}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({0, 1}), span({2, 2}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, FortranOrderWithOrigin) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {1, 1}, {0, 2}, {1, 2}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span({0, 1}), span({2, 2}),
[&](span<const int, 2> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, COrderWithBox) {
using R = std::vector<Index>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 1}, {0, 2}, {1, 1}, {1, 2}};
IterateOverIndexRange(
tensorstore::BoxView({0, 1}, {2, 2}),
[&](span<const Index, 2> x) { result.emplace_back(x.begin(), x.end()); },
ContiguousLayoutOrder::c);
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, RankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange<ContiguousLayoutOrder::fortran>(
span<const int, 0>(),
[&](span<const int, 0> x) { result.emplace_back(x.begin(), x.end()); });
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, Stop) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{{0, 0}, {0, 1}};
EXPECT_EQ(false, IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({2, 3}), [&](span<const int, 2> x) {
result.emplace_back(x.begin(), x.end());
return x[1] != 1;
}));
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, ZeroElementsBoolReturn) {
EXPECT_EQ(true, IterateOverIndexRange<ContiguousLayoutOrder::c>(
span({0}), [&](span<const int, 1> x) { return false; }));
}
TEST(IterateOverIndexRange, StaticRankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange(span<const int, 0>{}, [&](span<const int, 0> x) {
result.emplace_back(x.begin(), x.end());
});
EXPECT_EQ(expected_result, result);
}
TEST(IterateOverIndexRange, DynamicRankZero) {
using R = std::vector<int>;
std::vector<R> result;
const std::vector<R> expected_result{R{}};
IterateOverIndexRange(span<const int>(nullptr, 0), [&](span<const int> x) {
result.emplace_back(x.begin(), x.end());
});
EXPECT_EQ(expected_result, result);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/iterate_over_index_range.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/iterate_over_index_range_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
dc588779-d7fa-4d6d-84b8-e9aba3af929e | cpp | tensorflow/tensorflow | graph_partition | tensorflow/core/tfrt/utils/graph_partition.cc | tensorflow/core/tfrt/utils/graph_partition_test.cc | #include "tensorflow/core/tfrt/utils/graph_partition.h"
#include <algorithm>
#include <functional>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_partition.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
struct NodeInfo {
Node* node = nullptr;
DataType data_type;
int index = -1;
Node* node_copy = nullptr;
};
struct CallNodeInputInfo {
int index = -1;
DataType data_type;
Node* input_node = nullptr;
int input_node_index = -1;
Node* arg_node = nullptr;
Node* ret_node = nullptr;
};
struct OutputNodeInfo {
absl::flat_hash_map<std::string, NodeInfo> output_nodes;
std::optional<std::pair<std::string, NodeInfo>> auxiliary_output_node;
};
Status PrepareSubgraphForFunctionConversion(
const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs, const Device* host_device,
const std::string& func_name,
absl::flat_hash_map<std::string, NodeInfo>& input_nodes,
absl::flat_hash_map<std::string, NodeInfo>& output_nodes,
std::optional<std::pair<std::string, NodeInfo>>& auxiliary_output_node,
Graph* subgraph, Graph* graph) {
std::unordered_map<std::string, Node*> name_to_node_map =
subgraph->BuildNodeNameIndex();
int input_index = 0, output_index = 0;
for (const auto& input : inputs) {
int position = -1;
std::string node_name = grappler::ParseNodeName(input, &position);
if (position != 0) {
return errors::Unimplemented(
"Support for input node with multiple output tensors is not "
"implemented.");
}
if (name_to_node_map.count(node_name) == 0) continue;
Node* node = name_to_node_map.at(node_name);
NodeInfo node_info;
node_info.node = node;
node_info.data_type = node->output_type(position);
node_info.index = input_index++;
node_info.node_copy = graph->CopyNode(node);
input_nodes.emplace(node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * arg_node,
NodeBuilder(absl::StrCat("arg_", node_info.index, "/", node->name()),
"_Arg")
.Attr("index", node_info.index)
.Attr("T", node_info.data_type)
.Finalize(subgraph));
CHECK_EQ(node->num_inputs(), 0);
std::vector<const Edge*> out_edges(node->out_edges().begin(),
node->out_edges().end());
for (const Edge* edge : out_edges) {
if (edge->IsControlEdge()) {
subgraph->AddControlEdge(arg_node, edge->dst());
} else {
TF_RETURN_IF_ERROR(
subgraph->UpdateEdge(arg_node, 0, edge->dst(), edge->dst_input()));
}
}
subgraph->RemoveNode(node);
}
for (const auto& output : outputs) {
int position = -1;
std::string node_name = grappler::ParseNodeName(output, &position);
if (position != 0) {
return errors::Unimplemented(
"Support for output node with multiple output tensors is not "
"implemented.");
}
if (name_to_node_map.count(node_name) == 0) continue;
Node* node = name_to_node_map.at(node_name);
NodeInfo node_info;
node_info.node = node;
node_info.data_type = node->output_type(position);
node_info.index = output_index++;
output_nodes.emplace(node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * ret_node,
NodeBuilder(absl::StrCat("ret_", node_info.index, "/", node->name()),
"_Retval")
.Attr("index", node_info.index)
.Attr("T", node_info.data_type)
.Input(NodeBuilder::NodeOut(node->name(), position,
node_info.data_type))
.Finalize(subgraph));
node->set_name(node->name() + "/partition_renamed");
subgraph->AddEdge(node, 0, ret_node, 0);
}
if (output_nodes.empty()) {
const DataType data_type = DT_INT32;
TensorShape const_shape;
Tensor const_tensor(data_type, const_shape);
const_tensor.flat<int>()(0) = 0;
TF_ASSIGN_OR_RETURN(
Node * const_node,
NodeBuilder(absl::StrCat("const/unused/", func_name), "Const")
.AssignedDevice(host_device->name())
.Attr("dtype", data_type)
.Attr("value", const_tensor)
.Finalize(subgraph));
NodeInfo node_info;
node_info.node = const_node;
node_info.data_type = data_type;
node_info.index = output_index++;
auxiliary_output_node.emplace(const_node->name(), node_info);
TF_ASSIGN_OR_RETURN(
Node * ret_node,
NodeBuilder(
absl::StrCat("ret_", node_info.index, "/", const_node->name()),
"_Retval")
.Attr("index", node_info.index)
.Attr("T", data_type)
.Input(NodeBuilder::NodeOut(const_node->name(), 0, data_type))
.Finalize(subgraph));
subgraph->AddEdge(const_node, 0, ret_node, 0);
}
return absl::OkStatus();
}
absl::StatusOr<Node*> BuildPartitionedCallOp(
const std::string& func_name, const Device* host_device,
const std::string& device,
const absl::flat_hash_map<std::string, NodeInfo>& input_nodes,
const absl::flat_hash_map<std::string, NodeInfo>& output_nodes,
const absl::optional<std::pair<std::string, NodeInfo>>&
auxiliary_output_node,
const std::vector<std::string>& control_outputs, Graph* subgraph,
Graph* graph) {
std::string call_node_name = absl::StrCat("partitioned_call/", func_name);
NodeBuilder call_builder(call_node_name, "PartitionedCall");
call_builder.AssignedDevice(host_device->name());
call_builder.Attr(tensorflow::kNoInlineAttr, true);
std::vector<DataType> input_dtypes(input_nodes.size());
for (const auto& input_node : input_nodes) {
input_dtypes[input_node.second.index] = input_node.second.data_type;
}
call_builder.Attr("Tin", input_dtypes);
CHECK(auxiliary_output_node ? output_nodes.empty() : !output_nodes.empty());
std::vector<DataType> output_dtypes(
auxiliary_output_node ? 1 : output_nodes.size());
if (auxiliary_output_node) {
CHECK_EQ(auxiliary_output_node->second.index, 0);
output_dtypes[auxiliary_output_node->second.index] =
auxiliary_output_node->second.data_type;
} else {
for (const auto& output_node : output_nodes) {
output_dtypes[output_node.second.index] = output_node.second.data_type;
}
}
call_builder.Attr("Tout", output_dtypes);
std::vector<NodeBuilder::NodeOut> call_node_inputs(input_nodes.size());
for (const auto& input_node : input_nodes) {
call_node_inputs[input_node.second.index] =
NodeBuilder::NodeOut(input_node.second.node_copy, 0);
}
call_builder.Input(call_node_inputs);
NameAttrList f;
f.set_name(func_name);
call_builder.Attr("f", f);
TF_ASSIGN_OR_RETURN(Node * call_node, call_builder.Finalize(graph));
absl::flat_hash_set<std::string> control_ret_names(control_outputs.begin(),
control_outputs.end());
for (const Node* node : subgraph->op_nodes()) {
if (node->IsSend()) {
control_ret_names.insert(node->name());
}
}
auto control_ret_node_names =
[&control_ret_names](const Node* node) -> absl::optional<std::string> {
if (control_ret_names.contains(node->name())) {
return node->name();
}
return std::nullopt;
};
FunctionDef new_fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*subgraph, func_name,
control_ret_node_names, &new_fdef));
(*new_fdef.mutable_attr())[tensorflow::kNoInlineAttr].set_b(true);
(*new_fdef.mutable_attr())["device"].set_s(device);
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddFunctionDef(new_fdef));
return call_node;
}
absl::StatusOr<Node*> BuildStatefulPartitionedCallOp(
absl::flat_hash_map<std::string, CallNodeInputInfo>& call_node_input_info,
const absl::flat_hash_map<std::string, Node*>& all_partitioned_call_ops,
const std::string& stateful_call_func_name, const Device* host_device,
Graph* graph) {
std::string call_node_name =
absl::StrCat("stateful_partitioned_call/", stateful_call_func_name);
NodeBuilder call_builder(call_node_name, "StatefulPartitionedCall");
call_builder.Attr(tensorflow::kNoInlineAttr, true);
call_builder.AssignedDevice(host_device->name());
int num_output_nodes = call_node_input_info.size();
std::vector<DataType> input_dtypes(num_output_nodes);
for (const auto& node_info : call_node_input_info) {
CHECK(node_info.second.index < num_output_nodes);
input_dtypes[node_info.second.index] = node_info.second.data_type;
}
call_builder.Attr("Tin", input_dtypes);
call_builder.Attr("Tout", input_dtypes);
std::vector<NodeBuilder::NodeOut> call_node_inputs(num_output_nodes);
for (const auto& node_info : call_node_input_info) {
call_node_inputs[node_info.second.index] = NodeBuilder::NodeOut(
node_info.second.input_node, node_info.second.input_node_index);
}
call_builder.Input(call_node_inputs);
NameAttrList f;
f.set_name(stateful_call_func_name);
call_builder.Attr("f", f);
TF_ASSIGN_OR_RETURN(Node * stateful_call_node, call_builder.Finalize(graph));
auto id_graph = std::make_unique<Graph>(graph->flib_def().default_registry());
std::vector<NodeBuilder::NodeOut> output_tensors(num_output_nodes);
for (auto& node_info : call_node_input_info) {
TF_ASSIGN_OR_RETURN(node_info.second.arg_node,
NodeBuilder(absl::StrCat("arg_", node_info.second.index,
"/", stateful_call_func_name),
"_Arg")
.Attr("index", node_info.second.index)
.Attr("T", node_info.second.data_type)
.Finalize(id_graph.get()));
output_tensors[node_info.second.index] =
NodeBuilder::NodeOut(node_info.second.arg_node, 0);
}
TF_ASSIGN_OR_RETURN(
Node * identity_node,
NodeBuilder(absl::StrCat("identityN", "/", stateful_call_func_name),
"IdentityN")
.AssignedDevice(host_device->name())
.Input(output_tensors)
.Finalize(id_graph.get()));
for (auto& node_info : call_node_input_info) {
TF_ASSIGN_OR_RETURN(
node_info.second.ret_node,
NodeBuilder(absl::StrCat("ret_", node_info.second.index, "/",
stateful_call_func_name),
"_Retval")
.Attr("index", node_info.second.index)
.Attr("T", node_info.second.data_type)
.Input(NodeBuilder::NodeOut(identity_node, node_info.second.index))
.Finalize(id_graph.get()));
id_graph->AddEdge(identity_node, node_info.second.index,
node_info.second.ret_node, 0);
}
FunctionDef id_fdef;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*id_graph, stateful_call_func_name, &id_fdef));
(*id_fdef.mutable_attr())[tensorflow::kNoInlineAttr].set_b(true);
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddFunctionDef(id_fdef));
return stateful_call_node;
}
bool HasMultipleDevices(const Graph* graph) {
bool has_multiple_devices = false;
std::optional<std::string> location;
for (const Node* node : graph->op_nodes()) {
if (location) {
if (*location != node->assigned_device_name()) {
has_multiple_devices = true;
break;
}
} else {
location = node->assigned_device_name();
}
}
return has_multiple_devices;
}
std::string GetNameFromDevice(const std::string& device) {
std::string ret = device;
for (int i = 0; i < ret.size(); ++i) {
if (ret[i] == ':') ret[i] = '_';
}
return ret;
}
}
absl::StatusOr<std::unique_ptr<Graph>> InsertTransferOps(
const std::string& graph_func_name, const DeviceSet& device_set,
const Device* host_device, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs,
const std::vector<std::string>& control_outputs,
std::unique_ptr<Graph> graph) {
if (!HasMultipleDevices(graph.get())) {
return graph;
}
auto new_graph = std::make_unique<Graph>(graph->flib_def());
FunctionDefLibrary flib = graph->flib_def().ToProto();
std::unordered_map<string, std::unique_ptr<Graph>> partitions;
TF_RETURN_IF_ERROR(
PartitionFunctionGraph(device_set, std::move(graph), &partitions));
absl::flat_hash_map<std::string, Node*> all_partitioned_call_ops;
std::map<std::string, OutputNodeInfo> device_to_output_info_map;
for (auto& partition : partitions) {
const string& device = partition.first;
VLOG(1) << "Process the partitioin on device: " << device;
Graph* subgraph = partition.second.get();
TF_RETURN_IF_ERROR(subgraph->AddFunctionLibrary(flib));
FunctionNameGenerator name_generator(
&new_graph->flib_def(), absl::StrCat(graph_func_name, "-partition-",
GetNameFromDevice(device)));
std::string func_name = name_generator.GetName();
absl::flat_hash_map<std::string, NodeInfo> input_nodes;
OutputNodeInfo& output_node_info = device_to_output_info_map[device];
absl::flat_hash_map<std::string, NodeInfo>& output_nodes =
output_node_info.output_nodes;
std::optional<std::pair<std::string, NodeInfo>>& auxiliary_output_node =
output_node_info.auxiliary_output_node;
TF_RETURN_IF_ERROR(PrepareSubgraphForFunctionConversion(
inputs, outputs, host_device, func_name, input_nodes, output_nodes,
auxiliary_output_node, subgraph, new_graph.get()));
TF_ASSIGN_OR_RETURN(
Node * call_node,
BuildPartitionedCallOp(func_name, host_device, device, input_nodes,
output_nodes, auxiliary_output_node,
control_outputs, subgraph, new_graph.get()));
all_partitioned_call_ops[device] = call_node;
}
int input_index = 0;
absl::flat_hash_map<std::string, CallNodeInputInfo> call_node_input_info;
auto get_call_node_input_info = [&](const std::string& device,
const std::string& node_name,
const NodeInfo& node_info) {
CHECK(!call_node_input_info.contains(node_name));
CallNodeInputInfo& info = call_node_input_info[node_name];
info.index = input_index++;
info.data_type = node_info.data_type;
info.input_node = all_partitioned_call_ops.at(device);
info.input_node_index = node_info.index;
};
for (const auto& entry : device_to_output_info_map) {
const std::string& device = entry.first;
const OutputNodeInfo& output_info = entry.second;
for (const auto& node_info : output_info.output_nodes) {
get_call_node_input_info(device, node_info.first, node_info.second);
}
if (output_info.auxiliary_output_node) {
get_call_node_input_info(device, output_info.auxiliary_output_node->first,
output_info.auxiliary_output_node->second);
}
}
FunctionNameGenerator name_generator(
&new_graph->flib_def(),
absl::StrCat(graph_func_name, "/output_aggregator"));
std::string stateful_call_func_name = name_generator.GetName();
TF_ASSIGN_OR_RETURN(
Node * stateful_call_node,
BuildStatefulPartitionedCallOp(
call_node_input_info, all_partitioned_call_ops,
stateful_call_func_name, host_device, new_graph.get()));
for (const auto& node_info : call_node_input_info) {
TF_RETURN_IF_ERROR(NodeBuilder(node_info.first, "Identity")
.Input(NodeBuilder::NodeOut(stateful_call_node,
node_info.second.index))
.Attr("T", node_info.second.data_type)
.AssignedDevice(host_device->name())
.Finalize(new_graph.get(), nullptr));
}
CHECK_GT(stateful_call_node->num_outputs(), 0);
for (const auto& control_output : control_outputs) {
TF_RETURN_IF_ERROR(NodeBuilder(control_output, "Identity")
.Input(NodeBuilder::NodeOut(stateful_call_node, 0))
.Attr("T", stateful_call_node->output_type(0))
.AssignedDevice(host_device->name())
.Finalize(new_graph.get(), nullptr));
}
return new_graph;
}
}
} | #include "tensorflow/core/tfrt/utils/graph_partition.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/inline_function_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
using ::testing::IsEmpty;
using ::testing::NotNull;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphPartitionTest : public grappler::GrapplerTest {
public:
void SetUp() override {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", 2});
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0",
&devices));
device0_ = devices[0].get();
device1_ = devices[1].get();
device_mgr_ = std::make_unique<DynamicDeviceMgr>(std::move(devices));
for (auto d : device_mgr_->ListDevices()) {
device_set_.AddDevice(d);
}
}
std::unique_ptr<DeviceMgr> device_mgr_;
Device* device0_ = nullptr;
Device* device1_ = nullptr;
DeviceSet device_set_;
};
TEST_F(GraphPartitionTest, InsertTransferOpsWithOneDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope().WithDevice(device0_->name());
auto input = ops::Placeholder(scope.WithOpName("input"), DT_FLOAT);
auto id_x = ops::Identity(scope.WithOpName("identity"), input);
auto output = ops::Identity(scope.WithOpName("output"), id_x);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
GraphDef original_graphdef;
TF_ASSERT_OK(scope.ToGraphDef(&original_graphdef));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Graph> new_graph,
InsertTransferOps("test_graph", device_set_, device0_,
{"input"},
{"output"}, {},
std::move(graph)));
GraphDef new_graphdef;
new_graph->ToGraphDef(&new_graphdef);
CompareGraphs(original_graphdef, new_graphdef);
}
TEST_F(GraphPartitionTest, InsertTransferOpsWithTwoDevice) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Scope scope = Scope::NewRootScope();
Scope scope0 = scope.WithDevice(device0_->name());
Scope scope1 = scope.WithDevice(device1_->name());
auto input = ops::Placeholder(scope0.WithOpName("input"), DT_FLOAT);
Output id_x = ops::Identity(scope0.WithOpName("id_x"), input);
Output id_y = ops::Identity(scope1.WithOpName("id_y"), input);
auto output = ops::IdentityN(scope0.WithOpName("output"), {id_x, id_y});
TF_ASSERT_OK(scope.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(OpRegistry::Global());
Placer placer(graph.get(), "", &flib_def, &device_set_, device0_);
TF_ASSERT_OK(placer.Run());
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<Graph> new_graph,
InsertTransferOps("test_graph", device_set_, device0_,
{"input"},
{"output"}, {},
std::move(graph)));
GraphDef new_graphdef;
new_graph->ToGraphDef(&new_graphdef);
NodeDef* input_node = nullptr;
NodeDef* output_node = nullptr;
NodeDef* stateful_partitioned_call_node = nullptr;
std::vector<NodeDef*> partitioned_call_nodes;
for (NodeDef& node : *new_graphdef.mutable_node()) {
if (node.op() == "PartitionedCall") {
partitioned_call_nodes.push_back(&node);
} else if (node.op() == "StatefulPartitionedCall") {
stateful_partitioned_call_node = &node;
} else if (node.name() == "input") {
input_node = &node;
} else if (node.name() == "output") {
output_node = &node;
}
}
ASSERT_THAT(input_node, NotNull());
ASSERT_THAT(output_node, NotNull());
ASSERT_THAT(partitioned_call_nodes, SizeIs(2));
ASSERT_THAT(stateful_partitioned_call_node, NotNull());
EXPECT_THAT(stateful_partitioned_call_node->input(),
UnorderedElementsAre(partitioned_call_nodes[0]->name(),
partitioned_call_nodes[1]->name()));
absl::flat_hash_map<std::string, FunctionDef> func_name_to_func;
EXPECT_THAT(new_graphdef.library().function(), SizeIs(3));
for (const FunctionDef& fdef : new_graphdef.library().function()) {
ASSERT_TRUE(fdef.attr().contains(tensorflow::kNoInlineAttr));
EXPECT_TRUE(fdef.attr().at(tensorflow::kNoInlineAttr).b());
func_name_to_func[fdef.signature().name()] = fdef;
}
for (NodeDef* node : partitioned_call_nodes) {
ASSERT_TRUE(node->attr().contains("f"));
ASSERT_TRUE(func_name_to_func.contains(node->attr().at("f").func().name()));
const FunctionDef& fdef =
func_name_to_func.at(node->attr().at("f").func().name());
ASSERT_TRUE(fdef.attr().contains("device"));
if (fdef.attr().at("device").s() == device0_->name()) {
EXPECT_THAT(node->input(), UnorderedElementsAre(input_node->name()));
} else if (fdef.attr().at("device").s() == device1_->name()) {
EXPECT_THAT(node->input(), IsEmpty());
}
ASSERT_TRUE(node->attr().contains(tensorflow::kNoInlineAttr));
EXPECT_TRUE(node->attr().at(tensorflow::kNoInlineAttr).b());
int send_count = 0, recv_count = 0;
for (const NodeDef& node : fdef.node_def()) {
if (node.op() == "_Send")
++send_count;
else if (node.op() == "_Recv")
++recv_count;
}
EXPECT_EQ(send_count, 1);
EXPECT_EQ(recv_count, 1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/graph_partition.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/utils/graph_partition_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a217a66e-5969-4512-b0f5-1c473f8c4fa5 | cpp | google/quiche | qbone_stream | quiche/quic/qbone/qbone_stream.cc | quiche/quic/qbone/qbone_stream_test.cc | #include "quiche/quic/qbone/qbone_stream.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/qbone/qbone_constants.h"
#include "quiche/quic/qbone/qbone_session_base.h"
#include "quiche/common/platform/api/quiche_command_line_flags.h"
DEFINE_QUICHE_COMMAND_LINE_FLAG(int, qbone_stream_ttl_secs, 3,
"The QBONE Stream TTL in seconds.");
namespace quic {
QboneWriteOnlyStream::QboneWriteOnlyStream(QuicStreamId id,
QuicSession* session)
: QuicStream(id, session, false, WRITE_UNIDIRECTIONAL) {
MaybeSetTtl(QuicTime::Delta::FromSeconds(
quiche::GetQuicheCommandLineFlag(FLAGS_qbone_stream_ttl_secs)));
}
void QboneWriteOnlyStream::WritePacketToQuicStream(absl::string_view packet) {
WriteOrBufferData(packet, true, nullptr);
}
QboneReadOnlyStream::QboneReadOnlyStream(QuicStreamId id,
QboneSessionBase* session)
: QuicStream(id, session,
false, READ_UNIDIRECTIONAL),
session_(session) {
MaybeSetTtl(QuicTime::Delta::FromSeconds(
quiche::GetQuicheCommandLineFlag(FLAGS_qbone_stream_ttl_secs)));
}
void QboneReadOnlyStream::OnDataAvailable() {
sequencer()->Read(&buffer_);
if (sequencer()->IsClosed()) {
session_->ProcessPacketFromPeer(buffer_);
OnFinRead();
return;
}
if (buffer_.size() > QboneConstants::kMaxQbonePacketBytes) {
if (!rst_sent()) {
Reset(QUIC_BAD_APPLICATION_PAYLOAD);
}
StopReading();
}
}
} | #include "quiche/quic/qbone/qbone_stream.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_test_loopback.h"
#include "quiche/quic/qbone/qbone_constants.h"
#include "quiche/quic/qbone/qbone_session_base.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/mock_connection_id_generator.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quic {
namespace {
using ::testing::_;
using ::testing::StrictMock;
class MockQuicSession : public QboneSessionBase {
public:
MockQuicSession(QuicConnection* connection, const QuicConfig& config)
: QboneSessionBase(connection, nullptr , config,
CurrentSupportedVersions(), nullptr ) {}
~MockQuicSession() override {}
QuicConsumedData WritevData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset, StreamSendingState state,
TransmissionType type,
EncryptionLevel level) override {
if (!writable_) {
return QuicConsumedData(0, false);
}
return QuicConsumedData(write_length, state != StreamSendingState::NO_FIN);
}
QboneReadOnlyStream* CreateIncomingStream(QuicStreamId id) override {
return nullptr;
}
MOCK_METHOD(void, MaybeSendRstStreamFrame,
(QuicStreamId stream_id, QuicResetStreamError error,
QuicStreamOffset bytes_written),
(override));
MOCK_METHOD(void, MaybeSendStopSendingFrame,
(QuicStreamId stream_id, QuicResetStreamError error), (override));
void set_writable(bool writable) { writable_ = writable; }
void RegisterReliableStream(QuicStreamId stream_id) {
write_blocked_streams()->RegisterStream(stream_id,
false,
QuicStreamPriority());
}
void ActivateReliableStream(std::unique_ptr<QuicStream> stream) {
ActivateStream(std::move(stream));
}
std::unique_ptr<QuicCryptoStream> CreateCryptoStream() override {
return std::make_unique<test::MockQuicCryptoStream>(this);
}
MOCK_METHOD(void, ProcessPacketFromPeer, (absl::string_view), (override));
MOCK_METHOD(void, ProcessPacketFromNetwork, (absl::string_view), (override));
private:
bool writable_ = true;
};
class DummyPacketWriter : public QuicPacketWriter {
public:
DummyPacketWriter() {}
WriteResult WritePacket(const char* buffer, size_t buf_len,
const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address,
PerPacketOptions* options,
const QuicPacketWriterParams& params) override {
return WriteResult(WRITE_STATUS_ERROR, 0);
}
bool IsWriteBlocked() const override { return false; };
void SetWritable() override {}
std::optional<int> MessageTooBigErrorCode() const override {
return std::nullopt;
}
QuicByteCount GetMaxPacketSize(
const QuicSocketAddress& peer_address) const override {
return 0;
}
bool SupportsReleaseTime() const override { return false; }
bool IsBatchMode() const override { return false; }
bool SupportsEcn() const override { return false; }
QuicPacketBuffer GetNextWriteLocation(
const QuicIpAddress& self_address,
const QuicSocketAddress& peer_address) override {
return {nullptr, nullptr};
}
WriteResult Flush() override { return WriteResult(WRITE_STATUS_OK, 0); }
};
class QboneReadOnlyStreamTest : public ::testing::Test,
public QuicConnectionHelperInterface {
public:
void CreateReliableQuicStream() {
Perspective perspective = Perspective::IS_SERVER;
bool owns_writer = true;
alarm_factory_ = std::make_unique<test::MockAlarmFactory>();
connection_.reset(new QuicConnection(
test::TestConnectionId(0), QuicSocketAddress(TestLoopback(), 0),
QuicSocketAddress(TestLoopback(), 0),
this , alarm_factory_.get(),
new DummyPacketWriter(), owns_writer, perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0),
connection_id_generator_));
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
session_ = std::make_unique<StrictMock<MockQuicSession>>(connection_.get(),
QuicConfig());
session_->Initialize();
stream_ = new QboneReadOnlyStream(kStreamId, session_.get());
session_->ActivateReliableStream(
std::unique_ptr<QboneReadOnlyStream>(stream_));
}
~QboneReadOnlyStreamTest() override {}
const QuicClock* GetClock() const override { return &clock_; }
QuicRandom* GetRandomGenerator() override {
return QuicRandom::GetInstance();
}
quiche::QuicheBufferAllocator* GetStreamSendBufferAllocator() override {
return &buffer_allocator_;
}
protected:
QboneReadOnlyStream* stream_;
std::unique_ptr<StrictMock<MockQuicSession>> session_;
std::unique_ptr<QuicAlarmFactory> alarm_factory_;
std::unique_ptr<QuicConnection> connection_;
quiche::SimpleBufferAllocator buffer_allocator_;
MockClock clock_;
const QuicStreamId kStreamId = QuicUtils::GetFirstUnidirectionalStreamId(
CurrentSupportedVersions()[0].transport_version, Perspective::IS_CLIENT);
quic::test::MockConnectionIdGenerator connection_id_generator_;
};
TEST_F(QboneReadOnlyStreamTest, ReadDataWhole) {
std::string packet = "Stuff";
CreateReliableQuicStream();
QuicStreamFrame frame(kStreamId, true, 0, packet);
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
stream_->OnStreamFrame(frame);
}
TEST_F(QboneReadOnlyStreamTest, ReadBuffered) {
CreateReliableQuicStream();
std::string packet = "Stuf";
{
QuicStreamFrame frame(kStreamId, false, 0, packet);
stream_->OnStreamFrame(frame);
}
packet = "f";
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
{
QuicStreamFrame frame(kStreamId, true, 4, packet);
stream_->OnStreamFrame(frame);
}
}
TEST_F(QboneReadOnlyStreamTest, ReadOutOfOrder) {
CreateReliableQuicStream();
std::string packet = "f";
{
QuicStreamFrame frame(kStreamId, true, 4, packet);
stream_->OnStreamFrame(frame);
}
packet = "S";
{
QuicStreamFrame frame(kStreamId, false, 0, packet);
stream_->OnStreamFrame(frame);
}
packet = "tuf";
EXPECT_CALL(*session_, ProcessPacketFromPeer("Stuff"));
{
QuicStreamFrame frame(kStreamId, false, 1, packet);
stream_->OnStreamFrame(frame);
}
}
TEST_F(QboneReadOnlyStreamTest, ReadBufferedTooLarge) {
CreateReliableQuicStream();
std::string packet = "0123456789";
int iterations = (QboneConstants::kMaxQbonePacketBytes / packet.size()) + 2;
EXPECT_CALL(*session_, MaybeSendStopSendingFrame(
kStreamId, QuicResetStreamError::FromInternal(
QUIC_BAD_APPLICATION_PAYLOAD)));
EXPECT_CALL(
*session_,
MaybeSendRstStreamFrame(
kStreamId,
QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD), _));
for (int i = 0; i < iterations; ++i) {
QuicStreamFrame frame(kStreamId, i == (iterations - 1), i * packet.size(),
packet);
if (!stream_->reading_stopped()) {
stream_->OnStreamFrame(frame);
}
}
EXPECT_TRUE(stream_->reading_stopped());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
23685236-e6b3-4b48-baa2-5eeb90ec2264 | cpp | google/quiche | quiche_endian | quiche/common/quiche_endian.h | quiche/common/quiche_endian_test.cc | #ifndef QUICHE_COMMON_QUICHE_ENDIAN_H_
#define QUICHE_COMMON_QUICHE_ENDIAN_H_
#include <algorithm>
#include <cstdint>
#include <type_traits>
#include "quiche/common/platform/api/quiche_export.h"
namespace quiche {
enum Endianness {
NETWORK_BYTE_ORDER,
HOST_BYTE_ORDER
};
class QUICHE_EXPORT QuicheEndian {
public:
#if defined(__clang__) || \
(defined(__GNUC__) && \
((__GNUC__ == 4 && __GNUC_MINOR__ >= 8) || __GNUC__ >= 5))
static uint16_t HostToNet16(uint16_t x) { return __builtin_bswap16(x); }
static uint32_t HostToNet32(uint32_t x) { return __builtin_bswap32(x); }
static uint64_t HostToNet64(uint64_t x) { return __builtin_bswap64(x); }
#else
static uint16_t HostToNet16(uint16_t x) { return PortableByteSwap(x); }
static uint32_t HostToNet32(uint32_t x) { return PortableByteSwap(x); }
static uint64_t HostToNet64(uint64_t x) { return PortableByteSwap(x); }
#endif
static uint16_t NetToHost16(uint16_t x) { return HostToNet16(x); }
static uint32_t NetToHost32(uint32_t x) { return HostToNet32(x); }
static uint64_t NetToHost64(uint64_t x) { return HostToNet64(x); }
template <typename T>
static T PortableByteSwap(T input) {
static_assert(std::is_unsigned<T>::value, "T has to be uintNN_t");
union {
T number;
char bytes[sizeof(T)];
} value;
value.number = input;
std::reverse(&value.bytes[0], &value.bytes[sizeof(T)]);
return value.number;
}
};
enum QuicheVariableLengthIntegerLength : uint8_t {
VARIABLE_LENGTH_INTEGER_LENGTH_0 = 0,
VARIABLE_LENGTH_INTEGER_LENGTH_1 = 1,
VARIABLE_LENGTH_INTEGER_LENGTH_2 = 2,
VARIABLE_LENGTH_INTEGER_LENGTH_4 = 4,
VARIABLE_LENGTH_INTEGER_LENGTH_8 = 8,
kQuicheDefaultLongHeaderLengthLength = VARIABLE_LENGTH_INTEGER_LENGTH_2,
};
}
#endif | #include "quiche/common/quiche_endian.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
const uint16_t k16BitTestData = 0xaabb;
const uint16_t k16BitSwappedTestData = 0xbbaa;
const uint32_t k32BitTestData = 0xaabbccdd;
const uint32_t k32BitSwappedTestData = 0xddccbbaa;
const uint64_t k64BitTestData = 0xaabbccdd44332211;
const uint64_t k64BitSwappedTestData = 0x11223344ddccbbaa;
class QuicheEndianTest : public QuicheTest {};
TEST_F(QuicheEndianTest, Portable) {
EXPECT_EQ(k16BitSwappedTestData,
QuicheEndian::PortableByteSwap(k16BitTestData));
EXPECT_EQ(k32BitSwappedTestData,
QuicheEndian::PortableByteSwap(k32BitTestData));
EXPECT_EQ(k64BitSwappedTestData,
QuicheEndian::PortableByteSwap(k64BitTestData));
}
TEST_F(QuicheEndianTest, HostToNet) {
EXPECT_EQ(k16BitSwappedTestData,
quiche::QuicheEndian::HostToNet16(k16BitTestData));
EXPECT_EQ(k32BitSwappedTestData,
quiche::QuicheEndian::HostToNet32(k32BitTestData));
EXPECT_EQ(k64BitSwappedTestData,
quiche::QuicheEndian::HostToNet64(k64BitTestData));
}
TEST_F(QuicheEndianTest, NetToHost) {
EXPECT_EQ(k16BitTestData,
quiche::QuicheEndian::NetToHost16(k16BitSwappedTestData));
EXPECT_EQ(k32BitTestData,
quiche::QuicheEndian::NetToHost32(k32BitSwappedTestData));
EXPECT_EQ(k64BitTestData,
quiche::QuicheEndian::NetToHost64(k64BitSwappedTestData));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_endian.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_endian_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
37cd1887-c85b-4f59-ac67-475cfab01dc1 | cpp | tensorflow/tensorflow | all_reduce_contiguous | third_party/xla/xla/service/all_reduce_contiguous.cc | third_party/xla/xla/service/all_reduce_contiguous_test.cc | #include "xla/service/all_reduce_contiguous.h"
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
absl::Status ReplaceWithContiguousAllReduce(
HloAllReduceInstruction* all_reduce) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
int64_t total_size = 0;
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
total_size += num_elements;
}
Shape concat_shape = ShapeUtil::MakeShape(element_type, {total_size});
HloInstruction* concatenated =
computation.AddInstruction(HloInstruction::CreateConcatenate(
concat_shape, flat_operands, 0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
concat_shape, {concatenated}, all_reduce->to_apply(),
all_reduce->device_list(),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs;
outputs.reserve(all_reduce->operand_count());
int64_t offset = 0;
for (int64_t i = 0; i < all_reduce->operand_count(); ++i) {
const Shape& flat_shape = flat_operands[i]->shape();
int64_t end = offset + flat_shape.dimensions(0);
HloInstruction* sliced = computation.AddInstruction(
HloInstruction::CreateSlice(flat_shape, new_all_reduce,
{offset},
{end},
{1}));
outputs.push_back(computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), sliced)));
offset = end;
}
TF_RETURN_IF_ERROR(computation.ReplaceWithNewInstruction(
all_reduce, HloInstruction::CreateTuple(outputs)));
return absl::OkStatus();
}
}
absl::StatusOr<bool> AllReduceContiguous::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceContiguous";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceContiguous because the module contains all-reduce "
"with constrained layouts";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce &&
instruction->operand_count() > 1) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_RETURN_IF_ERROR(ReplaceWithContiguousAllReduce(all_reduce));
}
return !all_reduces.empty();
}
} | #include "xla/service/all_reduce_contiguous.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllReduceContiguousTest = HloTestBase;
TEST_F(AllReduceContiguousTest, Simple) {
const absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[128] parameter(0)
p1 = f32[4,4] parameter(1)
ROOT crs = (f32[128], f32[4,4]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
AllReduceContiguous pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* root = module->entry_computation()->root_instruction();
auto crs =
AllOf(op::Shape("f32[144]"),
op::AllReduce(op::Concatenate(op::Bitcast(op::Parameter(0)),
op::Bitcast(op::Parameter(1)))));
ASSERT_THAT(
root,
op::Tuple(AllOf(op::Shape("f32[128]"), op::Bitcast(op::Slice(crs))),
AllOf(op::Shape("f32[4,4]"), op::Bitcast(op::Slice(crs)))));
EXPECT_EQ(root->operand(0)->operand(0)->slice_starts(0), 0);
EXPECT_EQ(root->operand(0)->operand(0)->slice_limits(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_starts(0), 128);
EXPECT_EQ(root->operand(1)->operand(0)->slice_limits(0), 128 + 4 * 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_contiguous.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_reduce_contiguous_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
288c8bde-c5d9-4a6c-a2a7-e4e91ae9a23a | cpp | tensorflow/tensorflow | auto_scaler | tensorflow/core/data/service/auto_scaler.cc | tensorflow/core/data/service/auto_scaler_test.cc | #include "tensorflow/core/data/service/auto_scaler.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/metrics.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr double kAutoScalerOutlierSigmas = 1.0;
template <typename T>
double GetMedian(const absl::flat_hash_map<T, double>& rates) {
std::vector<double> sorted_rates;
for (const auto& [id, rate] : rates) {
sorted_rates.push_back(rate);
}
std::sort(sorted_rates.begin(), sorted_rates.end());
return sorted_rates[sorted_rates.size() / 2];
}
template <typename T>
double GetMean(const absl::flat_hash_map<T, double>& rates) {
double rates_sum = 0.0;
for (const auto& [id, rate] : rates) {
rates_sum += rate;
}
if (rates_sum == 0.0) return 0.0;
return rates_sum / static_cast<double>(rates.size());
}
template <typename T>
double GetStandardDeviation(const absl::flat_hash_map<T, double>& rates,
double mean) {
double squared_distances_sum = 0.0;
for (const auto& [id, rate] : rates) {
squared_distances_sum += (rate - mean) * (rate - mean);
}
if (squared_distances_sum == 0.0 || rates.size() <= 1) return 0.0;
return std::sqrt(squared_distances_sum /
static_cast<double>(rates.size() - 1));
}
template <typename T>
void ReplaceOutliers(const absl::flat_hash_map<T, double>& rates,
std::vector<double>& rates_without_outliers,
double outlier_sigmas) {
if (rates.empty()) return;
double mean = GetMean(rates);
double median = GetMedian(rates);
double standard_deviation = GetStandardDeviation(rates, mean);
double lower_threshold = mean - standard_deviation * outlier_sigmas;
double upper_threshold = mean + standard_deviation * outlier_sigmas;
for (const auto& [id, rate] : rates) {
if (rate >= lower_threshold && rate <= upper_threshold) {
rates_without_outliers.push_back(rate);
} else {
rates_without_outliers.push_back(median);
}
}
}
std::optional<int64_t> AutoScaler::GetOptimalNumberOfWorkers() const
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (worker_throughputs_.empty() || consumption_rates_.empty())
return std::nullopt;
std::vector<double> consumption_rates_without_outliers;
ReplaceOutliers(consumption_rates_, consumption_rates_without_outliers,
kAutoScalerOutlierSigmas);
double consumption_rates_sum_ =
std::accumulate(consumption_rates_without_outliers.begin(),
consumption_rates_without_outliers.end(), 0.0);
std::vector<double> worker_throughputs_without_outliers;
ReplaceOutliers(worker_throughputs_, worker_throughputs_without_outliers,
kAutoScalerOutlierSigmas);
double worker_throughputs_sum_ =
std::accumulate(worker_throughputs_without_outliers.begin(),
worker_throughputs_without_outliers.end(), 0.0);
double average_worker_throughput =
worker_throughputs_sum_ / static_cast<double>(worker_throughputs_.size());
int64_t optimal_number_of_workers =
ceil(consumption_rates_sum_ / average_worker_throughput);
return std::max(int64_t{1}, optimal_number_of_workers);
}
absl::Status AutoScaler::ReportProcessingTime(const std::string& worker_address,
absl::Duration processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(absl::StrCat(
"Cannot update processing_time with a ZeroDuration or negative value: ",
absl::FormatDuration(processing_time)));
}
double worker_throughput = 1.0 / absl::ToDoubleSeconds(processing_time);
tsl::mutex_lock l(mu_);
worker_throughputs_[worker_address] = worker_throughput;
return absl::OkStatus();
}
absl::Status AutoScaler::ReportTargetProcessingTime(
int64_t consumer_id, absl::Duration target_processing_time)
TF_LOCKS_EXCLUDED(mu_) {
if (target_processing_time <= absl::ZeroDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Cannot update target_processing_time with a ZeroDuration "
"or negative value: ",
absl::FormatDuration(target_processing_time)));
}
double consumption_rate = 1.0 / absl::ToDoubleSeconds(target_processing_time);
tsl::mutex_lock l(mu_);
consumption_rates_[consumer_id] = consumption_rate;
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveWorker(const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!worker_throughputs_.contains(worker_address))
return absl::NotFoundError(
absl::StrCat("Worker with address ", worker_address, " not found"));
worker_throughputs_.erase(worker_address);
return absl::OkStatus();
}
absl::Status AutoScaler::RemoveConsumer(int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!consumption_rates_.contains(consumer_id))
return absl::NotFoundError(
absl::StrCat("Consumer with ID ", consumer_id, " not found"));
consumption_rates_.erase(consumer_id);
return absl::OkStatus();
}
void MultipleIterationsAutoScaler::EnsureIterationIsRegistered(
int64_t iteration_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!auto_scalers_.contains(iteration_id)) {
auto_scalers_[iteration_id] = std::make_unique<AutoScaler>();
}
}
absl::Status MultipleIterationsAutoScaler::UnregisterIteration(
int64_t iteration_id) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat("AutoScaler for iteration_id ",
iteration_id, " does not exist"));
auto_scalers_.erase(iteration_id);
return absl::OkStatus();
}
absl::Status MultipleIterationsAutoScaler::UpdateOptimalNumberOfWorkersMetric(
int64_t current_number_of_workers) TF_LOCKS_EXCLUDED(mu_) {
if (current_number_of_workers <= 0)
return absl::InvalidArgumentError(
"The current number of workers must be positive");
std::optional<int64_t> optimal_number_of_workers =
GetOptimalNumberOfWorkers();
if (!optimal_number_of_workers)
return absl::UnavailableError(
"Cannot update the optimal number of workers metric because there are "
"no reported processing and target processing times for at least one "
"iteration");
VLOG(3) << "Estimated optimal number of workers: "
<< optimal_number_of_workers.value();
int64_t bound_optimal_number_of_workers = optimal_number_of_workers.value();
if (bound_optimal_number_of_workers > current_number_of_workers * 4 ||
bound_optimal_number_of_workers > current_number_of_workers + 500) {
bound_optimal_number_of_workers = std::min(current_number_of_workers * 4,
current_number_of_workers + 500);
}
bound_optimal_number_of_workers =
std::min(bound_optimal_number_of_workers, int64_t{100000});
VLOG(3) << "Bound optimal number of workers: "
<< bound_optimal_number_of_workers;
metrics::RecordTFDataServiceOptimalNumberOfWorkers(
bound_optimal_number_of_workers);
return absl::OkStatus();
}
std::optional<int64_t> MultipleIterationsAutoScaler::GetOptimalNumberOfWorkers()
const TF_LOCKS_EXCLUDED(mu_) {
int64_t optimal_number_of_workers = 0;
{
tsl::tf_shared_lock l(mu_);
for (const auto& [iteration_id, auto_scaler] : auto_scalers_) {
std::optional<int64_t> current_optimal_number_of_workers =
auto_scaler->GetOptimalNumberOfWorkers();
if (!current_optimal_number_of_workers.has_value()) continue;
optimal_number_of_workers = std::max(
optimal_number_of_workers, current_optimal_number_of_workers.value());
}
}
if (optimal_number_of_workers == 0)
return std::nullopt;
else
return optimal_number_of_workers;
}
absl::Status MultipleIterationsAutoScaler::ReportProcessingTime(
int64_t iteration_id, const std::string& worker_address,
absl::Duration processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportProcessingTime(
worker_address, processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::ReportTargetProcessingTime(
int64_t iteration_id, int64_t consumer_id,
absl::Duration target_processing_time) TF_LOCKS_EXCLUDED(mu_) {
tsl::mutex_lock l(mu_);
EnsureIterationIsRegistered(iteration_id);
absl::Status status = auto_scalers_[iteration_id]->ReportTargetProcessingTime(
consumer_id, target_processing_time);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveWorker(
int64_t iteration_id, const std::string& worker_address)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveWorker(worker_address);
return status;
}
absl::Status MultipleIterationsAutoScaler::RemoveConsumer(int64_t iteration_id,
int64_t consumer_id)
TF_LOCKS_EXCLUDED(mu_) {
tsl::tf_shared_lock l(mu_);
if (!auto_scalers_.contains(iteration_id))
return absl::NotFoundError(absl::StrCat(
"There are no reported times for iteration_id ", iteration_id));
absl::Status status =
auto_scalers_[iteration_id]->RemoveConsumer(consumer_id);
return status;
}
}
} | #include "tensorflow/core/data/service/auto_scaler.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace data {
namespace {
using ::tsl::testing::StatusIs;
TEST(AutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
AutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredWorkers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersNoRegisteredConsumers) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate1) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 8);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate2) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersExpectedEstimate3) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(0, absl::Seconds(0.01)));
TF_ASSERT_OK(auto_scaler.ReportTargetProcessingTime(1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersTPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(500)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Nanoseconds(3000000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Nanoseconds(2000000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 107);
}
TEST(AutoScalerTest, GetOptimalNumberOfWorkersRemoveOutliersPT) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Nanoseconds(80000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Nanoseconds(70000000)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Nanoseconds(1000)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Nanoseconds(300000)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 244);
}
TEST(AutoScalerTest, ReportProcessingTimeNewWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeExistingWorker) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/2:20000",
absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
"/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeExistingConsumer) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNewAndExisting) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, absl::Microseconds(10)));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, ReportTargetProcessingTimeNegativeDuration) {
AutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(AutoScalerTest, RemoveWorkerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/1:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/1:20000"));
}
TEST(AutoScalerTest, RemoveNonexistentWorker) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker("/worker/task/0:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveWorkerAfterNewPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime("/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker("/worker/task/0:20000"));
}
TEST(AutoScalerTest, RemoveConsumerSuccessful) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1));
}
TEST(AutoScalerTest, RemoveNonexistentConsumer) {
AutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(AutoScalerTest, RemoveConsumerAfterNewTPTReported) {
AutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterExistingIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.UnregisterIteration(0));
}
TEST(MultipleIterationsAutoScalerTest, UnregisterNonexistentIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.UnregisterIteration(0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricInvalidCurrentWorkers) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(0);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(-1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricNoReportedTPTs) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
absl::Status status = auto_scaler.UpdateOptimalNumberOfWorkersMetric(1);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricWithReportedTimes) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_GT(cell_reader.Read(), 0);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricIncreaseWithinLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(500)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(15));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 50);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric4xIncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(2));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 8);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetric500IncreaseLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(1000));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 1500);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest,
UpdateOptimalNumberOfWorkersMetricMaxLimit) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(200000)));
TF_ASSERT_OK(auto_scaler.UpdateOptimalNumberOfWorkersMetric(99700));
monitoring::testing::CellReader<int64_t> cell_reader(
"/tensorflow/data/service/optimal_number_of_workers");
EXPECT_EQ(cell_reader.Read(), 100000);
metrics::RecordTFDataServiceOptimalNumberOfWorkers(0);
}
TEST(MultipleIterationsAutoScalerTest, GetOptimalNumberOfWorkersInitialState) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredWorkers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(5)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(5)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersNoRegisteredConsumers) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), std::nullopt);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate1) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 11);
}
TEST(MultipleIterationsAutoScalerTest,
GetOptimalNumberOfWorkersExpectedEstimate2) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Seconds(0.15)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Seconds(0.025)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Seconds(0.05)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/0:20000",
absl::Seconds(0.1)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(2, "/worker/task/1:20000",
absl::Seconds(0.2)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 0, absl::Seconds(0.01)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(2, 1, absl::Seconds(0.02)));
EXPECT_EQ(auto_scaler.GetOptimalNumberOfWorkers(), 20);
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/1:20000",
absl::Microseconds(30)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/1:20000",
absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result = auto_scaler.ReportProcessingTime(
0, "/worker/task/0:20000", absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewIteration) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeNewConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeExistingWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNewAndExisting) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 1, absl::Microseconds(30)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 1, absl::Microseconds(30)));
}
TEST(MultipleIterationsAutoScalerTest, ReportTargetProcessingTimeZeroDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::ZeroDuration());
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest,
ReportTargetProcessingTimeNegativeDuration) {
MultipleIterationsAutoScaler auto_scaler;
absl::Status result =
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(-10));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveWorker(1, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(1, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
TF_ASSERT_OK(auto_scaler.RemoveWorker(1, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentWorker) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveWorker(0, "/worker/task/1:20000"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveWorkerAfterNewPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(10)));
TF_ASSERT_OK(auto_scaler.ReportProcessingTime(0, "/worker/task/0:20000",
absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveWorker(0, "/worker/task/0:20000"));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerUnregisteredIteration) {
MultipleIterationsAutoScaler auto_scaler;
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 0),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(auto_scaler.RemoveConsumer(1, 0),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerSuccessful) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(1, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(1, 0));
}
TEST(MultipleIterationsAutoScalerTest, RemoveNonexistentConsumer) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
EXPECT_THAT(auto_scaler.RemoveConsumer(0, 1),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(MultipleIterationsAutoScalerTest, RemoveConsumerAfterNewTPTReported) {
MultipleIterationsAutoScaler auto_scaler;
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(10)));
TF_ASSERT_OK(
auto_scaler.ReportTargetProcessingTime(0, 0, absl::Microseconds(20)));
TF_ASSERT_OK(auto_scaler.RemoveConsumer(0, 0));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/auto_scaler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/auto_scaler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ded06446-76e1-4235-8d69-e8c7e7375965 | cpp | google/libaddressinput | address_formatter | cpp/src/address_formatter.cc | cpp/test/address_formatter_test.cc | #include <libaddressinput/address_formatter.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <string>
#include <vector>
#include "format_element.h"
#include "language.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/cctype_tolower_equal.h"
#include "util/size.h"
namespace i18n {
namespace addressinput {
namespace {
const char kCommaSeparator[] = ", ";
const char kSpaceSeparator[] = " ";
const char kArabicCommaSeparator[] = "، ";
const char kLanguagesThatUseSpace[][3] = {
"th",
"ko",
};
const char kLanguagesThatHaveNoSeparator[][3] = {
"ja",
"zh",
};
const char kLanguagesThatUseAnArabicComma[][3] = {
"ar",
"fa",
"ku",
"ps",
"ur",
};
std::string GetLineSeparatorForLanguage(const std::string& language_tag) {
Language address_language(language_tag);
if (address_language.has_latin_script) {
return kCommaSeparator;
}
const std::string& base_language = address_language.base;
using std::placeholders::_1;
if (std::find_if(kLanguagesThatUseSpace,
kLanguagesThatUseSpace + size(kLanguagesThatUseSpace),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatUseSpace + size(kLanguagesThatUseSpace)) {
return kSpaceSeparator;
} else if (std::find_if(
kLanguagesThatHaveNoSeparator,
kLanguagesThatHaveNoSeparator +
size(kLanguagesThatHaveNoSeparator),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatHaveNoSeparator +
size(kLanguagesThatHaveNoSeparator)) {
return "";
} else if (std::find_if(
kLanguagesThatUseAnArabicComma,
kLanguagesThatUseAnArabicComma +
size(kLanguagesThatUseAnArabicComma),
std::bind(&EqualToTolowerString, _1, base_language)) !=
kLanguagesThatUseAnArabicComma +
size(kLanguagesThatUseAnArabicComma)) {
return kArabicCommaSeparator;
}
return kCommaSeparator;
}
void CombineLinesForLanguage(const std::vector<std::string>& lines,
const std::string& language_tag,
std::string* line) {
line->clear();
std::string separator = GetLineSeparatorForLanguage(language_tag);
for (auto it = lines.begin(); it != lines.end(); ++it) {
if (it != lines.begin()) {
line->append(separator);
}
line->append(*it);
}
}
}
void GetFormattedNationalAddress(
const AddressData& address_data, std::vector<std::string>* lines) {
assert(lines != nullptr);
lines->clear();
Rule rule;
rule.CopyFrom(Rule::GetDefault());
rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(address_data.region_code));
Language language(address_data.language_code);
const std::vector<FormatElement>& format =
language.has_latin_script && !rule.GetLatinFormat().empty()
? rule.GetLatinFormat()
: rule.GetFormat();
std::vector<FormatElement> pruned_format;
for (auto element_it = format.begin();
element_it != format.end();
++element_it) {
if (element_it->IsNewline() ||
(element_it->IsField() &&
!address_data.IsFieldEmpty(element_it->GetField())) ||
(!element_it->IsField() &&
(element_it + 1 == format.end() ||
!(element_it + 1)->IsField() ||
!address_data.IsFieldEmpty((element_it + 1)->GetField())) &&
(element_it == format.begin() ||
!(element_it - 1)->IsField() ||
(!pruned_format.empty() && pruned_format.back().IsField())))) {
pruned_format.push_back(*element_it);
}
}
std::string line;
for (const auto& element : pruned_format) {
if (element.IsNewline()) {
if (!line.empty()) {
lines->push_back(line);
line.clear();
}
} else if (element.IsField()) {
AddressField field = element.GetField();
if (field == STREET_ADDRESS) {
if (!address_data.IsFieldEmpty(field)) {
line.append(address_data.address_line.front());
if (address_data.address_line.size() > 1U) {
lines->push_back(line);
line.clear();
const auto last_element_iterator =
address_data.address_line.begin() +
address_data.address_line.size() - 1;
lines->insert(lines->end(), address_data.address_line.begin() + 1,
last_element_iterator);
line.append(*last_element_iterator);
}
}
} else {
line.append(address_data.GetFieldValue(field));
}
} else {
line.append(element.GetLiteral());
}
}
if (!line.empty()) {
lines->push_back(line);
}
}
void GetFormattedNationalAddressLine(
const AddressData& address_data, std::string* line) {
std::vector<std::string> address_lines;
GetFormattedNationalAddress(address_data, &address_lines);
CombineLinesForLanguage(address_lines, address_data.language_code, line);
}
void GetStreetAddressLinesAsSingleLine(
const AddressData& address_data, std::string* line) {
CombineLinesForLanguage(
address_data.address_line, address_data.language_code, line);
}
}
} | #include <libaddressinput/address_formatter.h>
#include <libaddressinput/address_data.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::GetFormattedNationalAddress;
using i18n::addressinput::GetFormattedNationalAddressLine;
using i18n::addressinput::GetStreetAddressLinesAsSingleLine;
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_EmptyAddress) {
const AddressData address;
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_TRUE(result.empty());
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_1Line) {
AddressData address{
.region_code = "US",
.address_line{"Line 1"},
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
address.language_code = "en";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
address.language_code = "zh-Hans";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1", result);
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_2Lines) {
AddressData address{
.region_code = "US",
.address_line{
"Line 1",
"Line 2",
},
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1, Line 2", result);
address.language_code = "en";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1, Line 2", result);
address.language_code = "zh-Hans";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1Line 2", result);
address.language_code = "ko";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1 Line 2", result);
address.language_code = "ar";
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ("Line 1، Line 2", result);
}
TEST(AddressFormatterTest, GetStreetAddressLinesAsSingleLine_5Lines) {
const AddressData address{
.region_code = "US",
.address_line{
"Line 1",
"Line 2",
"Line 3",
"Line 4",
"Line 5",
},
.language_code = "fr",
};
std::string result;
GetStreetAddressLinesAsSingleLine(address, &result);
EXPECT_EQ(result, "Line 1, Line 2, Line 3, Line 4, Line 5");
}
TEST(AddressFormatterTest, GetFormattedNationalAddressLocalLanguage) {
AddressData address{
.region_code = "NZ",
.address_line{
"Rotopapa",
"Irwell 3RD",
},
.locality = "Leeston",
.postal_code = "8704",
};
const std::vector<std::string> expected{
"Rotopapa",
"Irwell 3RD",
"Leeston 8704",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.language_code = "en-Latn-CN";
lines.clear();
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
std::string one_line;
GetFormattedNationalAddressLine(address, &one_line);
EXPECT_EQ("Rotopapa, Irwell 3RD, Leeston 8704", one_line);
}
TEST(AddressFormatterTest, GetFormattedNationalAddressLatinFormat) {
static const char kTaiwanCity[] = "大安區";
static const char kTaiwanAdmin[] = "台北市";
static const char kTaiwanStreetLine[] = "台灣信義路三段33號";
static const char kPostalCode[] = "106";
const AddressData address{
.region_code = "TW",
.address_line{kTaiwanStreetLine},
.administrative_area = kTaiwanAdmin,
.locality = kTaiwanCity,
.postal_code = kPostalCode,
.language_code = "zh-Hant",
};
const std::vector<std::string> expected{
kPostalCode,
std::string(kTaiwanAdmin).append(kTaiwanCity),
kTaiwanStreetLine,
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
std::string one_line;
GetFormattedNationalAddressLine(address, &one_line);
EXPECT_EQ(std::string(kPostalCode)
.append(kTaiwanAdmin)
.append(kTaiwanCity)
.append(kTaiwanStreetLine),
one_line);
const AddressData latin_address{
.region_code = "TW",
.address_line{"No. 33, Section 3 Xinyi Rd"},
.administrative_area = "Taipei City",
.locality = "Da-an District",
.postal_code = kPostalCode,
.language_code = "zh-Latn",
};
const std::vector<std::string> expected_latin{
"No. 33, Section 3 Xinyi Rd",
"Da-an District, Taipei City 106",
};
lines.clear();
GetFormattedNationalAddress(latin_address, &lines);
EXPECT_EQ(expected_latin, lines);
GetFormattedNationalAddressLine(latin_address, &one_line);
EXPECT_EQ("No. 33, Section 3 Xinyi Rd, Da-an District, Taipei City 106",
one_line);
}
TEST(AddressFormatterTest, GetFormattedNationalAddressMultilingualCountry) {
const AddressData address{
.region_code = "CA",
.address_line{
"5 Rue du Tresor",
"Apt. 4",
},
.administrative_area = "QC",
.locality = "Montmagny",
.postal_code = "G1R 123",
.language_code = "fr",
};
const std::vector<std::string> expected{
"5 Rue du Tresor",
"Apt. 4",
"Montmagny QC G1R 123",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest, GetFormattedNationalAddress_InlineStreetAddress) {
const AddressData address{
.region_code = "CI",
.address_line{"32 Boulevard Carde"},
.locality = "Abidjan",
.sorting_code = "64",
.language_code = "zh-Hant",
};
const std::vector<std::string> expected{"64 32 Boulevard Carde Abidjan 64"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralsAroundField) {
AddressData address{.region_code = "CH"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Zurich";
expected.emplace_back("Zurich");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "8001";
expected.back().assign("CH-8001 Zurich");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality.clear();
expected.back().assign("CH-8001");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralsBetweenFields) {
AddressData address{.region_code = "US"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area = "CA";
expected.emplace_back("CA");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Los Angeles";
expected.back().assign("Los Angeles, CA");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "90291";
expected.back().assign("Los Angeles, CA 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area.clear();
expected.back().assign("Los Angeles 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality.clear();
address.administrative_area = "CA";
expected.back().assign("CA 90291");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralOnSeparateLine) {
AddressData address{.region_code = "AX"};
std::vector<std::string> expected{"ÅLAND"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "City";
expected.emplace(expected.begin(), "City");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "123";
expected.front().assign("AX-123 City");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_LiteralBeforeField) {
AddressData address{
.region_code = "JP",
.language_code = "ja",
};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code = "123";
expected.emplace_back("〒123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.administrative_area = "Prefecture";
expected.emplace_back("Prefecture");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.postal_code.clear();
expected.erase(expected.begin());
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddress_LiteralBeforeOneAddressLine) {
const AddressData address{
.region_code = "JP",
.address_line{"Roppongi Hills"},
.administrative_area = "Tokyo",
.language_code = "ja_Latn",
};
const std::vector<std::string> expected{"Roppongi Hills, Tokyo"};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddress_LiteralBeforeTwoAddressLines) {
const AddressData address{
.region_code = "JP",
.address_line{
"Roppongi Hills",
"Mori Tower",
},
.administrative_area = "Tokyo",
.language_code = "ja_Latn",
};
const std::vector<std::string> expected{
"Roppongi Hills",
"Mori Tower, Tokyo",
};
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
TEST(AddressFormatterTest,
GetFormattedNationalAddressMissingFields_DuplicateField) {
AddressData address{.region_code = "CI"};
std::vector<std::string> expected;
std::vector<std::string> lines;
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.sorting_code = "123";
expected.emplace_back("123 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.address_line.emplace_back("456 Main St");
expected.back().assign("123 456 Main St 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.locality = "Yamoussoukro";
expected.back().assign("123 456 Main St Yamoussoukro 123");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.sorting_code.erase();
expected.back().assign("456 Main St Yamoussoukro");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
address.address_line.clear();
expected.back().assign("Yamoussoukro");
GetFormattedNationalAddress(address, &lines);
EXPECT_EQ(expected, lines);
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/address_formatter.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/address_formatter_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
1ceb4a07-5c78-4d1a-9f00-874e52495ac2 | cpp | google/tensorstore | blosc_compressor | tensorstore/internal/compression/blosc_compressor.cc | tensorstore/driver/n5/blosc_compressor_test.cc | #include "tensorstore/internal/compression/blosc_compressor.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "riegeli/base/chain.h"
#include "riegeli/bytes/chain_reader.h"
#include "riegeli/bytes/cord_writer.h"
#include "riegeli/bytes/read_all.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/write.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/internal/compression/blosc.h"
namespace tensorstore {
namespace internal {
namespace {
class BloscDeferredWriter : public riegeli::CordWriter<absl::Cord> {
public:
explicit BloscDeferredWriter(blosc::Options options,
std::unique_ptr<riegeli::Writer> base_writer)
: CordWriter(riegeli::CordWriterBase::Options().set_max_block_size(
std::numeric_limits<size_t>::max())),
options_(std::move(options)),
base_writer_(std::move(base_writer)) {}
void Done() override {
CordWriter::Done();
auto output = blosc::Encode(dest().Flatten(), options_);
if (!output.ok()) {
Fail(std::move(output).status());
return;
}
auto status = riegeli::Write(*std::move(output), std::move(base_writer_));
if (!status.ok()) {
Fail(std::move(status));
return;
}
}
private:
blosc::Options options_;
std::unique_ptr<riegeli::Writer> base_writer_;
};
}
std::unique_ptr<riegeli::Writer> BloscCompressor::GetWriter(
std::unique_ptr<riegeli::Writer> base_writer, size_t element_bytes) const {
return std::make_unique<BloscDeferredWriter>(
blosc::Options{codec.c_str(), level, shuffle, blocksize, element_bytes},
std::move(base_writer));
}
std::unique_ptr<riegeli::Reader> BloscCompressor::GetReader(
std::unique_ptr<riegeli::Reader> base_reader, size_t element_bytes) const {
auto output = riegeli::ReadAll(
std::move(base_reader),
[](absl::string_view input) -> absl::StatusOr<std::string> {
auto output = blosc::Decode(input);
if (!output.ok()) return std::move(output).status();
return *std::move(output);
});
auto reader = std::make_unique<riegeli::ChainReader<riegeli::Chain>>(
output.ok() ? riegeli::Chain(*std::move(output)) : riegeli::Chain());
if (!output.ok()) {
reader->Fail(std::move(output).status());
}
return reader;
}
}
} | #include <cstdint>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/array.h"
#include "tensorstore/driver/n5/compressor.h"
#include "tensorstore/driver/n5/metadata.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::span;
using ::tensorstore::internal_n5::Compressor;
using ::tensorstore::internal_n5::DecodeChunk;
using ::tensorstore::internal_n5::N5Metadata;
TEST(BloscCompressionTest, Parse) {
for (auto codec : {"lz4", "blosclz", "lz4hc", "snappy", "zlib", "zstd"}) {
for (int level = 0; level <= 9; ++level) {
for (int shuffle = 0; shuffle <= 2; ++shuffle) {
for (int blocksize : {0, 256}) {
::nlohmann::json j{{"type", "blosc"},
{"cname", codec},
{"shuffle", shuffle},
{"clevel", level},
{"blocksize", blocksize}};
tensorstore::TestJsonBinderRoundTripJsonOnly<Compressor>({j});
}
}
}
}
EXPECT_THAT(
Compressor::FromJson({{"type", "blosc"}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", 3}, {"shuffle", 0}, {"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "invalid"},
{"shuffle", 0},
{"clevel", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", -1}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 10}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", -1},
{"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
Compressor::FromJson(
{{"type", "blosc"}, {"cname", "lz4"}, {"shuffle", 3}, {"clevel", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(Compressor::FromJson({{"type", "blosc"},
{"cname", "lz4"},
{"shuffle", 0},
{"clevel", 3},
{"extra", 5}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(BloscCompressionTest, RoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto metadata, N5Metadata::FromJson({{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{{"type", "blosc"},
{"cname", "lz4"},
{"clevel", 5},
{"shuffle", 0}}}}));
auto array = MakeArray<uint16_t>({{{1, 2, 3}, {4, 5, 6}}});
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
TEST(BloscCompressionTest, Golden) {
const unsigned char kData[] = {
0x00, 0x00,
0x00, 0x03,
0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x03,
0x02, 0x01, 0x96, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x0c, 0x00,
0x00, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02,
0x00, 0x03, 0x00, 0x04, 0x00, 0x05, 0x00, 0x06,
};
std::string encoded_data(std::begin(kData), std::end(kData));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata,
N5Metadata::FromJson({
{"dimensions", {10, 11, 12}},
{"blockSize", {1, 2, 3}},
{"dataType", "uint16"},
{"compression",
{
{"type", "blosc"},
{"clevel", 3},
{"blocksize", 0},
{"cname", "zstd"},
{"shuffle", 2},
}},
}));
auto array = MakeArray<uint16_t>({{{1, 3, 5}, {2, 4, 6}}});
EXPECT_EQ(array, DecodeChunk(metadata, absl::Cord(encoded_data)));
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto buffer, EncodeChunk(metadata, array));
EXPECT_EQ(array, DecodeChunk(metadata, buffer));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/compression/blosc_compressor.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/n5/blosc_compressor_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
800c1f91-fb75-4e4d-b139-9a34c49d661b | cpp | google/cel-cpp | type_type | common/types/type_type.cc | common/types/type_type_test.cc | #include "common/type.h"
#include "absl/base/nullability.h"
#include "absl/types/span.h"
#include "google/protobuf/arena.h"
namespace cel {
namespace common_internal {
struct TypeTypeData final {
static TypeTypeData* Create(absl::Nonnull<google::protobuf::Arena*> arena,
const Type& type) {
return google::protobuf::Arena::Create<TypeTypeData>(arena, type);
}
explicit TypeTypeData(const Type& type) : type(type) {}
TypeTypeData() = delete;
TypeTypeData(const TypeTypeData&) = delete;
TypeTypeData(TypeTypeData&&) = delete;
TypeTypeData& operator=(const TypeTypeData&) = delete;
TypeTypeData& operator=(TypeTypeData&&) = delete;
const Type type;
};
}
TypeType::TypeType(absl::Nonnull<google::protobuf::Arena*> arena, const Type& parameter)
: TypeType(common_internal::TypeTypeData::Create(arena, parameter)) {}
TypeParameters TypeType::GetParameters() const {
if (data_) {
return TypeParameters(absl::MakeConstSpan(&data_->type, 1));
}
return {};
}
Type TypeType::GetType() const {
if (data_) {
return data_->type;
}
return Type();
}
} | #include "common/type.h"
#include <sstream>
#include "absl/hash/hash.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(TypeType, Kind) {
EXPECT_EQ(TypeType().kind(), TypeType::kKind);
EXPECT_EQ(Type(TypeType()).kind(), TypeType::kKind);
}
TEST(TypeType, Name) {
EXPECT_EQ(TypeType().name(), TypeType::kName);
EXPECT_EQ(Type(TypeType()).name(), TypeType::kName);
}
TEST(TypeType, DebugString) {
{
std::ostringstream out;
out << TypeType();
EXPECT_EQ(out.str(), TypeType::kName);
}
{
std::ostringstream out;
out << Type(TypeType());
EXPECT_EQ(out.str(), TypeType::kName);
}
}
TEST(TypeType, Hash) {
EXPECT_EQ(absl::HashOf(TypeType()), absl::HashOf(TypeType()));
}
TEST(TypeType, Equal) {
EXPECT_EQ(TypeType(), TypeType());
EXPECT_EQ(Type(TypeType()), TypeType());
EXPECT_EQ(TypeType(), Type(TypeType()));
EXPECT_EQ(Type(TypeType()), Type(TypeType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_type.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/type_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
727970ad-3063-4030-bfab-9ed7b4be0677 | cpp | tensorflow/tensorflow | backports | tensorflow/tools/graph_transforms/backports.cc | tensorflow/tools/graph_transforms/backports_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status BackportConcatV2Transform(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, {"ConcatV2"},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& concat_v2_node = match.node;
NodeDef concat_node = concat_v2_node;
concat_node.set_op("Concat");
concat_node.mutable_input()->Clear();
const string& dim_input =
concat_v2_node.input(concat_v2_node.input_size() - 1);
concat_node.add_input(dim_input);
for (int i = 0; i < (concat_v2_node.input_size() - 1); ++i) {
concat_node.add_input(concat_v2_node.input(i));
}
concat_node.mutable_attr()->erase("Tidx");
new_nodes->push_back(concat_node);
return OkStatus();
},
{true}, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("backport_concatv2", BackportConcatV2Transform);
Status BackportTensorArrayV3Transform(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::map<string, string> inputs_to_rename;
GraphDef replaced_graph_def;
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, {"TensorArrayV3|TensorArrayGradV3"},
[&inputs_to_rename](const NodeMatch& match,
const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& tensor_array_v3_node = match.node;
NodeDef tensor_array_v2_node = tensor_array_v3_node;
if (tensor_array_v3_node.op() == "TensorArrayV3") {
tensor_array_v2_node.set_op("TensorArrayV2");
} else {
tensor_array_v2_node.set_op("TensorArrayGradV2");
}
NodeDef replacement_flow_node;
replacement_flow_node.set_op("Const");
SetNodeAttr("dtype", DT_FLOAT, &replacement_flow_node);
replacement_flow_node.set_name(tensor_array_v3_node.name() +
"/replacement_flow_node");
Tensor replacement_flow_tensor(DT_FLOAT, {});
replacement_flow_tensor.flat<float>()(0) = 1.0f;
SetNodeTensorAttr<float>("value", replacement_flow_tensor,
&replacement_flow_node);
inputs_to_rename[tensor_array_v3_node.name() + ":1"] =
replacement_flow_node.name();
new_nodes->push_back(tensor_array_v2_node);
new_nodes->push_back(replacement_flow_node);
return OkStatus();
},
{true}, &replaced_graph_def));
GraphDef renamed_graph_def;
TF_RETURN_IF_ERROR(RenameNodeInputs(replaced_graph_def, inputs_to_rename,
std::unordered_set<string>(),
&renamed_graph_def));
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
renamed_graph_def,
{"TensorArrayWriteV3|TensorArrayReadV3|TensorArrayGatherV3|"
"TensorArrayScatterV3|TensorArrayConcatV3|TensorArraySplitV3|"
"TensorArraySizeV3|TensorArrayCloseV3"},
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
const NodeDef& v3_node = match.node;
NodeDef v2_node = v3_node;
v2_node.set_op(v3_node.op().substr(0, v3_node.op().size() - 1) + "2");
new_nodes->push_back(v2_node);
return OkStatus();
},
{true}, output_graph_def));
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("backport_tensor_array_v3",
BackportTensorArrayV3Transform);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status BackportConcatV2Transform(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
Status BackportTensorArrayV3Transform(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class BackportConcatV2Test : public ::testing::Test {
protected:
void TestBackportConcatV2() {
GraphDef graph_def;
NodeDef* mul_node1 = graph_def.add_node();
mul_node1->set_name("mul_node1");
mul_node1->set_op("Mul");
mul_node1->add_input("add_node2");
mul_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* concat_node = graph_def.add_node();
concat_node->set_name("concat_node");
concat_node->set_op("ConcatV2");
concat_node->add_input("const_node1");
concat_node->add_input("const_node2");
concat_node->add_input("const_node3");
SetNodeAttr("Tidx", DT_INT32, concat_node);
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"concat_node"};
TF_ASSERT_OK(BackportConcatV2Transform(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("concat_node"));
EXPECT_EQ("Concat", node_lookup.at("concat_node")->op());
EXPECT_EQ(0, node_lookup.at("concat_node")->attr().count("Tidx"));
EXPECT_EQ("const_node3", node_lookup.at("concat_node")->input(0));
EXPECT_EQ("const_node1", node_lookup.at("concat_node")->input(1));
EXPECT_EQ("const_node2", node_lookup.at("concat_node")->input(2));
EXPECT_EQ(1, node_lookup.count("const_node1"));
EXPECT_EQ("Const", node_lookup.at("const_node1")->op());
EXPECT_EQ(1, node_lookup.count("const_node2"));
EXPECT_EQ("Const", node_lookup.at("const_node2")->op());
EXPECT_EQ(1, node_lookup.count("const_node3"));
EXPECT_EQ("Const", node_lookup.at("const_node3")->op());
}
};
TEST_F(BackportConcatV2Test, TestBackportConcatV2) { TestBackportConcatV2(); }
TEST(BackportTensorArrayV3Test, TestBackportTensorArrayV3) {
GraphDef graph_def;
NodeDef* size_node = graph_def.add_node();
size_node->set_name("size_node");
size_node->set_op("Const");
Tensor size_tensor(DT_INT32, {});
size_tensor.flat<int32>()(0) = 1;
SetNodeTensorAttr<float>("value", size_tensor, size_node);
NodeDef* tensor_array_node = graph_def.add_node();
tensor_array_node->set_name("tensor_array_node");
tensor_array_node->set_op("TensorArrayV3");
tensor_array_node->add_input("size_node");
SetNodeAttr("dtype", DT_FLOAT, tensor_array_node);
SetNodeAttr("element_shape", TensorShape({1, 2}), tensor_array_node);
SetNodeAttr("dynamic_size", false, tensor_array_node);
SetNodeAttr("clear_after_read", true, tensor_array_node);
SetNodeAttr("tensor_array_name", "some_name", tensor_array_node);
NodeDef* handle_output_node = graph_def.add_node();
handle_output_node->set_name("handle_output_node");
handle_output_node->set_op("Identity");
handle_output_node->add_input("tensor_array_node:0");
NodeDef* flow_output_node = graph_def.add_node();
flow_output_node->set_name("flow_output_node");
flow_output_node->set_op("Identity");
flow_output_node->add_input("tensor_array_node:1");
NodeDef* tensor_array_grad_node = graph_def.add_node();
tensor_array_grad_node->set_name("tensor_array_grad_node");
tensor_array_grad_node->set_op("TensorArrayGradV3");
tensor_array_grad_node->add_input("tensor_array_node:0");
tensor_array_grad_node->add_input("tensor_array_node:1");
SetNodeAttr("source", "foo", tensor_array_grad_node);
NodeDef* grad_handle_output_node = graph_def.add_node();
grad_handle_output_node->set_name("grad_handle_output_node");
grad_handle_output_node->set_op("Identity");
grad_handle_output_node->add_input("tensor_array_grad_node:0");
NodeDef* grad_flow_output_node = graph_def.add_node();
grad_flow_output_node->set_name("grad_flow_output_node");
grad_flow_output_node->set_op("Identity");
grad_flow_output_node->add_input("tensor_array_grad_node:1");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"handle_output_node", "grad_handle_output_node"};
TF_ASSERT_OK(BackportTensorArrayV3Transform(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
ASSERT_EQ(1, node_lookup.count("tensor_array_node"));
EXPECT_EQ("TensorArrayV2", node_lookup.at("tensor_array_node")->op());
EXPECT_EQ("TensorArrayGradV2",
node_lookup.at("tensor_array_grad_node")->op());
for (const NodeDef& node : result.node()) {
for (const string& input : node.input()) {
EXPECT_NE("tensor_array_node:1", input);
}
}
}
TEST(BackportTensorArrayV3Test, TestBackportTensorArrayV3Subtypes) {
const std::vector<string> v3_ops = {
"TensorArrayWriteV3", "TensorArrayReadV3", "TensorArrayGatherV3",
"TensorArrayScatterV3", "TensorArrayConcatV3", "TensorArraySplitV3",
"TensorArraySizeV3", "TensorArrayCloseV3"};
for (const string& v3_op : v3_ops) {
GraphDef graph_def;
NodeDef* v3_node = graph_def.add_node();
v3_node->set_name("v3_node");
v3_node->set_op(v3_op);
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {""};
TF_ASSERT_OK(BackportTensorArrayV3Transform(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
ASSERT_EQ(1, node_lookup.count("v3_node"));
EXPECT_TRUE(absl::EndsWith(node_lookup.at("v3_node")->op(), "V2"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/backports.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/backports_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04d9dd6c-f45e-463d-9604-dc577fbebea4 | cpp | google/arolla | weak_qtype_operators | arolla/expr/operators/weak_qtype_operators.cc | arolla/expr/operators/weak_qtype_operators_test.cc | #include "arolla/expr/operators/weak_qtype_operators.h"
#include <cstdint>
#include <memory>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/type_meta_eval_strategies.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::CallOp;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr::ExprOperatorSignature;
class CoreToWeakFloatOp final : public expr::BasicExprOperator {
public:
CoreToWeakFloatOp()
: expr::BasicExprOperator(
"core.to_weak_float", ExprOperatorSignature{{"x"}},
"Casts a floating point value to the corresponding weak float "
"type.",
FingerprintHasher("::arolla::expr_operators::CoreToWeakFloatOp")
.Finish()) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> inputs) const override {
ASSIGN_OR_RETURN(auto scalar_type, GetScalarQType(inputs[0]));
if (!(IsNumeric(scalar_type) || IsBoolean(scalar_type) ||
scalar_type == GetQType<uint64_t>())) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected a numeric or boolean number, got: %s", inputs[0]->name()));
}
if (IsOptionalQType(inputs[0])) {
return GetOptionalWeakFloatQType();
}
if (IsArrayLikeQType(inputs[0])) {
ASSIGN_OR_RETURN(auto shape_qtype, GetShapeQType(inputs[0]));
return shape_qtype->WithValueQType(GetWeakFloatQType());
}
return GetWeakFloatQType();
}
absl::StatusOr<ExprNodePtr> ToLowerLevel(
const ExprNodePtr& node) const final {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
auto op =
std::make_shared<expr::DerivedQTypeDowncastOperator>(node->qtype());
return CallOp(op, {CallOp("core.to_float64", {node->node_deps()[0]})});
}
};
}
absl::StatusOr<ExprOperatorPtr> MakeCoreToWeakFloatOperator() {
return std::make_shared<CoreToWeakFloatOp>();
}
} | #include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/array/array.h"
#include "arolla/array/qtype/types.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/operators/bootstrap_operators.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/qtype/weak_qtype.h"
namespace arolla::expr_operators {
namespace {
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::testing::InvokeExprOperator;
TEST(WeakQTypeOperatorsTest, ToWeakFloat) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr to_weak_float, GetCoreToWeakFloat());
ASSERT_OK_AND_ASSIGN(auto res,
InvokeExprOperator<TypedValue>(to_weak_float, 1.0));
EXPECT_EQ(res.GetType(), GetWeakFloatQType());
}
TEST(WeakQTypeOperatorsTest, ToWeakFloat_Float32) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr to_weak_float, GetCoreToWeakFloat());
ASSERT_OK_AND_ASSIGN(auto res,
InvokeExprOperator<TypedValue>(to_weak_float, 1.0f));
EXPECT_EQ(res.GetType(), GetWeakFloatQType());
}
TEST(WeakQTypeOperatorsTest, ToWeakFloat_Optional) {
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr to_weak_float, GetCoreToWeakFloat());
ASSERT_OK_AND_ASSIGN(auto res, InvokeExprOperator<TypedValue>(
to_weak_float, OptionalValue<float>(1.0)));
EXPECT_EQ(res.GetType(), GetOptionalWeakFloatQType());
}
TEST(WeakQTypeOperatorsTest, ToWeakFloat_Array) {
GetArrayWeakFloatQType();
ASSERT_OK_AND_ASSIGN(ExprOperatorPtr to_weak_float, GetCoreToWeakFloat());
auto values = CreateArray<float>({1, std::nullopt, std::nullopt, 2});
ASSERT_OK_AND_ASSIGN(auto res,
InvokeExprOperator<TypedValue>(to_weak_float, values));
EXPECT_EQ(res.GetType(), GetArrayWeakFloatQType());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/weak_qtype_operators.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/weak_qtype_operators_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
853f3f85-8a15-440b-a243-417ea3be940b | cpp | tensorflow/tensorflow | priority_fusion | third_party/xla/xla/service/gpu/transforms/priority_fusion.cc | third_party/xla/xla/service/gpu/transforms/priority_fusion_test.cc | #include "xla/service/gpu/transforms/priority_fusion.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusion_deduplication_cache.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/fusions/triton/triton_support.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/triton_emitter_constraints.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
bool IsFusible(const HloInstruction& instr) {
if (!instr.IsFusible()) {
return false;
}
if (instr.IsElementwise()) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kFusion:
return instr.fusion_kind() != HloInstruction::FusionKind::kCustom;
case HloOpcode::kCopy:
case HloOpcode::kIota:
case HloOpcode::kConstant:
case HloOpcode::kReduce:
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kScatter:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
GpuBackendConfig GetTritonGpuBackendConfig(
const BlockLevelParameters& block_level_parameters) {
GpuBackendConfig gpu_backend_config;
gpu_backend_config.mutable_fusion_backend_config()->set_kind(
std::string(kTritonFusionKind));
*gpu_backend_config.mutable_fusion_backend_config()
->mutable_block_level_fusion_config() =
block_level_parameters.ToBlockLevelFusionConfig();
return gpu_backend_config;
}
class PriorityFusionQueue {
using Priority = absl::Duration;
using CanFuseCallback = std::function<FusionDecision(
HloInstruction* , int64_t )>;
public:
PriorityFusionQueue(HloComputation* computation,
const GpuHloCostAnalysis::Options& cost_analysis_options,
const se::DeviceDescription* device_info,
FusionProcessDumpProto* fusion_process_dump,
tsl::thread::ThreadPool* thread_pool,
mlir::MLIRContext* mlir_context,
HloFusionAnalysisCache& fusion_analysis_cache,
FusionDeduplicationCache& fusion_deduplication_cache,
bool triton_softmax_priority_fusion_enabled)
: computation_(computation),
device_info_(device_info),
cost_analysis_(cost_analysis_options, *device_info),
gpu_indexing_performance_model_(device_info, &fusion_analysis_cache,
cost_analysis_options.shape_size,
mlir_context),
fusion_process_dump_(fusion_process_dump),
thread_pool_(thread_pool),
mlir_context_(mlir_context),
fusion_analysis_cache_(fusion_analysis_cache),
fusion_deduplication_cache_(fusion_deduplication_cache),
triton_softmax_priority_fusion_enabled_(
triton_softmax_priority_fusion_enabled) {
VLOG(2) << "Running full HLO cost analysis for " << computation_->name();
TF_CHECK_OK(computation_->Accept(&cost_analysis_));
dump_fusion_visualization_ = computation->parent()
->config()
.debug_options()
.xla_dump_fusion_visualization();
std::vector<HloInstruction*> instructions;
for (auto* instruction : computation->MakeInstructionPostOrder()) {
TF_CHECK_OK(UpdatePerformanceModelCache(instruction));
if (instruction->opcode() == HloOpcode::kParameter ||
instruction->user_count() == 0 || !instruction->IsFusible() ||
instruction->opcode() == HloOpcode::kTuple ||
instruction->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
instructions.push_back(instruction);
}
ComputeAndSetPriorities(instructions);
}
void ComputeAndSetPriorities(
const std::vector<HloInstruction*>& instructions) {
std::vector<Priority> priorities = ComputePriorities(instructions);
for (auto [instruction, priority] : llvm::zip(instructions, priorities)) {
auto key = std::make_pair(priority, instruction->unique_id());
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it != reverse_map_.end()) {
const PriorityQueue::iterator& queue_it = reverse_it->second;
if (key == queue_it->first) {
continue;
}
producer_priority_queue_.erase(queue_it);
reverse_map_.erase(reverse_it);
}
if (priority < absl::ZeroDuration()) {
continue;
}
auto emplace_result = producer_priority_queue_.emplace(key, instruction);
reverse_map_.emplace(instruction, emplace_result.first);
}
}
std::vector<Priority> ComputePriorities(
const std::vector<HloInstruction*>& instructions) {
auto schedule_or_run = [this](std::function<void()> fn) {
if (thread_pool_) {
thread_pool_->Schedule(std::move(fn));
} else {
fn();
}
};
tsl::BlockingCounter counter(instructions.size());
std::vector<Priority> priorities(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
schedule_or_run([&, i] {
priorities[i] = CalculateProducerPriority(instructions[i]);
counter.DecrementCount();
});
}
counter.Wait();
return priorities;
}
bool DequeueNextProducer() {
current_producer_ = nullptr;
current_consumers_.clear();
while (!producer_priority_queue_.empty() && current_consumers_.empty()) {
auto next_it = std::prev(producer_priority_queue_.end());
current_producer_ = next_it->second;
producer_priority_queue_.erase(next_it);
reverse_map_.erase(current_producer_);
current_consumers_ = current_producer_->users();
if (current_producer_->opcode() == HloOpcode::kBitcast) {
llvm::erase_if(current_consumers_, [&](HloInstruction* consumer) {
return !CanFuseCached(current_producer_, consumer);
});
}
}
return !current_consumers_.empty();
}
absl::Status UpdatePerformanceModelCache(HloInstruction* producer) {
bool is_triton_fusion = IsGenericTritonFusion(*producer);
if (!IsFusible(*producer) && !is_triton_fusion) {
return absl::OkStatus();
}
if (gpu_performance_model_cache_.Get(*producer)) {
return absl::OkStatus();
}
EstimateRunTimeData runtime_data;
if (is_triton_fusion) {
TF_ASSIGN_OR_RETURN(
runtime_data,
gpu_indexing_performance_model_.EstimateRunTimeForTriton(producer));
} else {
auto config = GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_);
runtime_data = GpuPerformanceModel::EstimateRunTimeForInstruction(
producer, *device_info_, &cost_analysis_, config);
}
gpu_performance_model_cache_.Set(*producer, runtime_data);
return absl::OkStatus();
}
absl::Status UpdatePriorities() {
for (auto instruction : to_update_priority_) {
TF_RETURN_IF_ERROR(cost_analysis_.RevisitInstruction(instruction));
}
for (auto producer : to_update_priority_) {
TF_RETURN_IF_ERROR(UpdatePerformanceModelCache(producer));
}
ComputeAndSetPriorities(std::vector<HloInstruction*>{
to_update_priority_.begin(), to_update_priority_.end()});
to_update_priority_.clear();
operands_to_new_consumers_.clear();
operands_to_removed_consumers_runtimes_.clear();
return absl::OkStatus();
}
void PreFusion(HloInstruction* producer, HloInstruction* consumer) {
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("About to fuse |", producer->name(), "| into |",
consumer->name(), "| inside PriorityFusion"),
*consumer, producer);
}
}
void InvalidateCaches(HloInstruction* instruction) {
can_fuse_cache_.erase(instruction);
for (const HloInstruction* operand : instruction->operands()) {
auto it = can_fuse_cache_.find(operand);
if (it != can_fuse_cache_.end()) {
it->second.erase(instruction);
}
}
block_level_parameters_cache_.erase(instruction);
for (const HloInstruction* operand : instruction->operands()) {
auto it = block_level_parameters_cache_.find(operand);
if (it != block_level_parameters_cache_.end()) {
it->second.erase(instruction);
}
}
gpu_performance_model_cache_.Invalidate(*instruction);
fusion_analysis_cache_.Invalidate(*instruction);
fusion_info_cache_.Invalidate(instruction);
}
void UpdateRuntimes(
GpuPerformanceModel::RunTimes& runtimes, const HloInstruction* consumer,
const absl::flat_hash_map<const HloInstruction*, absl::Duration>&
original_consumers) {
auto it = original_consumers.find(consumer);
if (it != original_consumers.end()) {
runtimes.time_fused += it->second;
auto consumer_cache_result = gpu_performance_model_cache_.Get(*consumer);
CHECK(consumer_cache_result.has_value());
runtimes.time_unfused += (*consumer_cache_result).exec_time;
}
}
void ComputeRuntimesOfRemovedConsumers() {
for (const auto& pair : operands_to_new_consumers_) {
auto operand = pair.first;
if (!reverse_map_.contains(operand)) {
continue;
}
if (!gpu_performance_model_cache_.ContainsConsumers(*operand)) {
continue;
}
const auto& original_consumers =
gpu_performance_model_cache_.GetAllConsumers(*operand);
GpuPerformanceModel::RunTimes runtimes;
for (auto consumer : current_consumers()) {
UpdateRuntimes(runtimes, consumer, original_consumers);
}
UpdateRuntimes(runtimes, current_producer(), original_consumers);
auto operand_cache_result = gpu_performance_model_cache_.Get(*operand);
runtimes.time_unfused += (*operand_cache_result).exec_time +
GpuPerformanceModel::kKernelLaunchOverhead;
operands_to_removed_consumers_runtimes_.emplace(operand, runtimes);
}
}
void OnFusingInstruction(HloInstruction* fusion,
HloInstruction* original_producer,
HloInstruction* original_consumer) {
if (fusion_process_dump_) {
auto* fusion_step =
fusion_process_dump_->add_fusion_steps()->mutable_fusion();
fusion_step->set_fusion_name(std::string(fusion->name()));
fusion_step->set_producer_name(std::string(original_producer->name()));
fusion_step->set_consumer_name(std::string(original_consumer->name()));
}
if (dump_fusion_visualization_) {
RegisterFusionState(
*computation_,
absl::StrCat("Fused |", original_producer->name(), "| into |",
fusion->name(), "| inside PriorityFusion"),
*fusion);
}
if (fusion != original_consumer) {
RemoveInstruction(original_consumer);
}
for (HloInstruction* operand : fusion->operands()) {
if (operand == original_producer ||
operand->opcode() == HloOpcode::kConstant ||
operand->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
if (!operand->IsFusible()) {
continue;
}
to_update_priority_.insert(operand);
operands_to_new_consumers_[operand].push_back(fusion);
}
to_update_priority_.insert(fusion);
}
void RemoveInstruction(HloInstruction* instruction) {
to_update_priority_.erase(instruction);
fusion_analysis_cache_.Invalidate(*instruction);
auto reverse_it = reverse_map_.find(instruction);
if (reverse_it == reverse_map_.end()) {
return;
}
producer_priority_queue_.erase(reverse_it->second);
reverse_map_.erase(reverse_it);
}
absl::flat_hash_map<const HloInstruction*, BlockLevelParameters>
GetBlockLevelParametersMap(const HloInstruction* producer) {
auto it = block_level_parameters_cache_.find(producer);
if (it == block_level_parameters_cache_.end()) {
return {};
}
return it->second;
}
HloInstruction* current_producer() { return current_producer_; }
const std::vector<HloInstruction*>& current_consumers() {
return current_consumers_;
}
private:
Priority CalculateProducerPriority(HloInstruction* producer) {
if (producer->opcode() == HloOpcode::kBitcast) {
return absl::InfiniteDuration();
}
if (producer->opcode() == HloOpcode::kConstant) {
return -absl::InfiniteDuration();
}
if (auto fusion_decision = CanFuseWithAllNonBitcastUsers(producer);
!fusion_decision) {
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step = fusion_process_dump_->add_fusion_steps()
->mutable_producer_ineligible();
step->set_producer_name(std::string(producer->name()));
step->set_reason(fusion_decision.Explain());
}
return -absl::InfiniteDuration();
}
auto removed_consumers_runtime_it =
operands_to_removed_consumers_runtimes_.find(producer);
bool is_incremental_update = removed_consumers_runtime_it !=
operands_to_removed_consumers_runtimes_.end();
absl::Span<HloInstruction* const> fused_consumers =
is_incremental_update
? operands_to_new_consumers_.find(producer)->second
: absl::MakeConstSpan(producer->users());
GpuPerformanceModel::RunTimes run_times =
GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
producer, *device_info_, &cost_analysis_,
GpuPerformanceModelOptions::PriorityFusion(
&fusion_analysis_cache_, &gpu_performance_model_cache_),
fused_consumers);
Priority current_priority;
if (is_incremental_update) {
const GpuPerformanceModel::RunTimes& removed_consumers_runtime =
removed_consumers_runtime_it->second;
run_times.time_unfused -= removed_consumers_runtime.time_unfused;
run_times.time_fused -= removed_consumers_runtime.time_fused;
const PriorityQueue::iterator& queue_it =
FindOrDie(reverse_map_, producer);
current_priority = queue_it->first.first;
}
if (fusion_process_dump_) {
absl::MutexLock lock(&fusion_process_dump_mutex_);
auto* step =
fusion_process_dump_->add_fusion_steps()->mutable_update_priority();
step->set_producer_name(std::string(producer->name()));
for (auto* consumer : producer->users()) {
step->add_consumer_names(std::string(consumer->name()));
}
step->set_us_fused(absl::ToDoubleMicroseconds(run_times.time_fused));
step->set_us_unfused(absl::ToDoubleMicroseconds(run_times.time_unfused));
}
return current_priority + run_times.time_unfused - run_times.time_fused;
}
FusionDecision IsTritonSupported(const HloInstruction& instruction) {
if (instruction.opcode() != HloOpcode::kFusion) {
return IsTritonSupportedInstruction(
instruction, device_info_->gpu_compute_capability());
}
for (const HloInstruction* instruction :
instruction.fused_instructions_computation()->instructions()) {
if (auto codegen_decision = IsTritonSupportedInstruction(
*instruction, device_info_->gpu_compute_capability());
!codegen_decision) {
return codegen_decision;
}
}
return FusionDecision::Allow();
}
TiledRunTimeDataOrError GetTiledRunTimeDataCached(
const HloInstruction* producer, const HloInstruction* consumer) {
FusionDeduplicationCache::FusionId fusion_id = [&]() {
absl::MutexLock lock(&fusion_deduplication_cache_mutex_);
return fusion_deduplication_cache_.GetFusionId(*producer, *consumer);
}();
{
absl::MutexLock lock(&tiled_run_time_data_cache_mutex_);
auto it = tiled_run_time_data_cache_.find(fusion_id);
if (it != tiled_run_time_data_cache_.end()) {
return it->second;
}
}
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
absl::StatusOr<TiledRunTimeDataOrError> result_or_status =
gpu_indexing_performance_model_.TryFindBestTilingForFusion(*fusion);
TiledRunTimeDataOrError tiled_run_time_data_or_error =
[&]() -> TiledRunTimeDataOrError {
if (result_or_status.ok()) {
return *result_or_status;
} else {
return FusionDecision::Forbid(
absl::StrCat("TiledRunTimeDataOrError return status: ",
result_or_status.status().message()));
}
}();
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&tiled_run_time_data_or_error)) {
tiled_run_time_data_or_error = FusionDecision::Forbid(
absl::StrCat("Fusion can not be tiled with SymbolicTileAnalysis: ",
fusion_decision->Explain()));
}
absl::MutexLock lock(&tiled_run_time_data_cache_mutex_);
tiled_run_time_data_cache_.emplace(fusion_id, tiled_run_time_data_or_error);
return tiled_run_time_data_or_error;
}
FusionDecision CanFuseTriton(HloInstruction* producer,
HloInstruction* consumer) {
if (!triton_softmax_priority_fusion_enabled_) {
return FusionDecision::Forbid("triton softmax fusion is not enabled");
}
if (IsGenericTritonFusion(*producer)) {
if (!IsFusible(*consumer)) {
return FusionDecision::Forbid("the consumer is not fusible");
}
if (auto fusion_decision = IsTritonSupported(*consumer);
!fusion_decision) {
return fusion_decision;
}
} else {
if (!IsFusible(*producer)) {
return FusionDecision::Forbid("the producer is not fusible");
}
if (auto fusion_decision = IsTritonSupported(*producer);
!fusion_decision) {
return fusion_decision;
}
}
TiledRunTimeDataOrError tiled_run_time_data_or_error =
GetTiledRunTimeDataCached(producer, consumer);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&tiled_run_time_data_or_error)) {
return *fusion_decision;
}
TiledRunTimeData tiled_run_time_data =
std::get<TiledRunTimeData>(std::move(tiled_run_time_data_or_error));
gpu_performance_model_cache_.Set(
*producer, *consumer, tiled_run_time_data.runtime_data.exec_time);
{
absl::MutexLock lock(&block_level_parameters_cache_mutex_);
block_level_parameters_cache_[producer][consumer] =
tiled_run_time_data.block_level_parameters;
}
return FusionDecision::Allow();
}
FusionDecision CanFuse(HloInstruction* producer, HloInstruction* consumer) {
if (IsGenericTritonFusion(*producer) || IsGenericTritonFusion(*consumer)) {
return CanFuseTriton(producer, consumer);
}
if (!IsFusible(*producer)) {
return FusionDecision::Forbid("the producer is not fusible");
}
if (!IsFusible(*consumer)) {
return FusionDecision::Forbid("the consumer is not fusible");
}
if (consumer->opcode() == HloOpcode::kBitcast) {
return FusionDecision::Forbid(
"not fusing into a single bitcast as consumer");
}
if (auto can_fuse = CanEmitInputFusedScatter(*producer, *consumer);
!can_fuse) {
return can_fuse;
}
auto contains_significant_reduce = [&](const HloInstruction* instr) {
auto fusion = HloFusionAdaptor::ForInstruction(instr);
return HloAnyOf(*fusion, [](auto node) {
if (!(node.opcode() == HloOpcode::kReduce && node.shape().IsArray())) {
return false;
}
int64_t reduction_size =
ShapeUtil::ElementsIn(node.instruction().operand(0)->shape()) /
ShapeUtil::ElementsIn(node.shape());
return reduction_size >= 16;
});
};
if (contains_significant_reduce(producer) &&
contains_significant_reduce(consumer)) {
return FusionDecision::Forbid(
"both the producer and the consumer contain a reduce");
}
const auto& analysis = fusion_analysis_cache_.Get(*producer);
if (analysis.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kReduction) {
const auto& analysis_fused =
fusion_analysis_cache_.Get(*producer, *consumer);
if (analysis_fused.GetEmitterFusionKind() ==
HloFusionAnalysis::EmitterFusionKind::kLoop) {
return FusionDecision::Forbid(
"fusion into output of a reduce fusion would create a loop fusion");
}
}
if (auto fits_budget = FusionFitsInBudget(
*consumer, *producer, *device_info_,
true, &fusion_info_cache_);
!fits_budget) {
return fits_budget;
}
if (cost_analysis_.ProducerConsumerMergedTooLarge(*producer, *consumer)) {
return FusionDecision::Forbid(
"the fusion would result in an overly large code duplication");
}
if (producer == producer->parent()->root_instruction()) {
return FusionDecision::Forbid(
"not fusing into the output of the root instruction");
}
return InstructionFusion::ShouldFuseInPlaceOp(producer, consumer);
}
FusionDecision CanFuseCached(HloInstruction* producer,
HloInstruction* consumer) {
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
auto& producer_cache = can_fuse_cache_[producer];
auto it = producer_cache.find(consumer);
if (it != producer_cache.end()) {
return it->second;
}
}
auto fusion_decision = CanFuse(producer, consumer);
{
absl::MutexLock lock(&can_fuse_cache_mutex_);
can_fuse_cache_[producer].insert_or_assign(consumer, fusion_decision);
}
return fusion_decision;
}
FusionDecision CanFuseWithAllNonBitcastUsers(HloInstruction* producer) {
if (producer->users().empty()) {
return FusionDecision::Forbid("No users to fuse");
}
bool has_non_bitcast_user = false;
for (const auto& user : producer->users()) {
if (user->opcode() == HloOpcode::kBitcast) {
continue;
}
has_non_bitcast_user = true;
if (auto fusion_decision = CanFuseCached(producer, user);
!fusion_decision) {
VLOG(10) << "Cannot fuse " << producer->name() << " with "
<< user->name() << ", because: " << fusion_decision.Explain();
return fusion_decision;
}
}
if (!has_non_bitcast_user) {
return FusionDecision::Forbid(
"not fusing because there are only bitcast users");
}
return FusionDecision::Allow();
}
HloComputation* computation_;
const se::DeviceDescription* device_info_;
GpuHloCostAnalysis cost_analysis_;
GpuPerformanceModelWithIndexingAnalysis gpu_indexing_performance_model_;
using PriorityQueue = std::map<std::pair<Priority, int>, HloInstruction*>;
PriorityQueue producer_priority_queue_;
absl::flat_hash_map<HloInstruction*, PriorityQueue::iterator> reverse_map_;
HloInstruction* current_producer_;
std::vector<HloInstruction*> current_consumers_;
absl::flat_hash_set<HloInstruction*> to_update_priority_;
absl::flat_hash_map<HloInstruction*, std::vector<HloInstruction*>>
operands_to_new_consumers_;
absl::flat_hash_map<HloInstruction*, GpuPerformanceModel::RunTimes>
operands_to_removed_consumers_runtimes_;
FusionProcessDumpProto* fusion_process_dump_;
absl::Mutex fusion_process_dump_mutex_;
tsl::thread::ThreadPool* thread_pool_;
mlir::MLIRContext* mlir_context_;
HloFusionAnalysisCache& fusion_analysis_cache_;
FusionDeduplicationCache& fusion_deduplication_cache_;
absl::Mutex fusion_deduplication_cache_mutex_;
absl::flat_hash_map<
const HloInstruction*,
absl::flat_hash_map<const HloInstruction*, FusionDecision>>
can_fuse_cache_;
absl::Mutex can_fuse_cache_mutex_;
absl::flat_hash_map<
const HloInstruction*,
absl::flat_hash_map<const HloInstruction*, BlockLevelParameters>>
block_level_parameters_cache_;
absl::Mutex block_level_parameters_cache_mutex_;
absl::flat_hash_map<FusionDeduplicationCache::FusionId,
TiledRunTimeDataOrError>
tiled_run_time_data_cache_;
absl::Mutex tiled_run_time_data_cache_mutex_;
GpuPerformanceModelCache gpu_performance_model_cache_;
FusionInfoCache fusion_info_cache_;
bool triton_softmax_priority_fusion_enabled_;
bool dump_fusion_visualization_;
};
}
bool IsSmallConstant(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kConstant && instr->shape().IsArray() &&
ShapeUtil::ElementsIn(instr->shape()) <= 1;
}
bool PriorityFusion::ConsumeFuel(HloInstruction* producer,
HloInstruction* consumer) {
return xla::ConsumeFuel(name(), [&] {
return absl::StrFormat("Not fusing producer %s with consumer %s",
producer->name(), consumer->name());
});
};
absl::StatusOr<bool> PriorityFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool dump_enabled =
DumpingEnabledForHloPass(name(), module->config().debug_options());
if (dump_enabled) {
fusion_process_dump_ = std::make_unique<FusionProcessDumpProto>();
*fusion_process_dump_->mutable_gpu_device_info() =
device_info_.ToGpuProto();
}
auto fusible_computations =
GetFusibleComputations(*module, execution_threads);
for (auto* computation : fusible_computations) {
for (auto* instruction : computation->instructions()) {
module->SetAndUniquifyInstrName(instruction,
absl::StrCat(instruction->name(), ".0"));
}
}
if (dump_enabled) {
fusion_process_dump_->set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
}
bool triton_softmax_priority_fusion_enabled =
module->config()
.debug_options()
.xla_gpu_experimental_enable_triton_softmax_priority_fusion();
FusionDeduplicationCache fusion_deduplication_cache =
FusionDeduplicationCache::Create(*module);
int changed = false;
for (auto* computation : fusible_computations) {
CHECK(!computation->IsFusionComputation());
auto fusion_queue = std::make_unique<PriorityFusionQueue>(
computation, cost_analysis_options_, &device_info_,
fusion_process_dump_.get(), thread_pool_, &mlir_context_,
fusion_analysis_cache_, fusion_deduplication_cache,
triton_softmax_priority_fusion_enabled);
while (fusion_queue->DequeueNextProducer()) {
auto producer = fusion_queue->current_producer();
absl::flat_hash_map<const HloInstruction*, BlockLevelParameters>
block_level_parameters_map =
fusion_queue->GetBlockLevelParametersMap(producer);
for (auto* consumer : fusion_queue->current_consumers()) {
if (consumer->opcode() == HloOpcode::kBitcast) {
continue;
}
if (!ConsumeFuel(producer, consumer)) continue;
VLOG(5) << "next: " << consumer->name() << "(" << consumer << ") + "
<< producer->name() << "(" << producer << ")";
int64_t consumer_operand_index = consumer->operand_index(producer);
fusion_queue->PreFusion(producer, consumer);
auto fusion_instruction = Fuse(producer, consumer);
fusion_deduplication_cache.UpdateFusedInstructionId(
*fusion_instruction, *producer, *consumer, consumer_operand_index);
fusion_queue->OnFusingInstruction(fusion_instruction, producer,
consumer);
auto backend_config_it = block_level_parameters_map.find(consumer);
if (backend_config_it != block_level_parameters_map.end()) {
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(
GetTritonGpuBackendConfig(backend_config_it->second)));
}
changed = true;
}
fusion_queue->ComputeRuntimesOfRemovedConsumers();
if (producer->user_count() == 0) {
fusion_queue->InvalidateCaches(producer);
producer->DetachFromOperandsAndUsers();
fusion_queue->RemoveInstruction(producer);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(producer));
}
for (auto* consumer : fusion_queue->current_consumers()) {
fusion_queue->InvalidateCaches(consumer);
}
TF_RETURN_IF_ERROR(fusion_queue->UpdatePriorities());
}
std::vector<HloInstruction*> constants;
for (auto* instruction : computation->instructions()) {
if (IsSmallConstant(instruction)) {
constants.push_back(instruction);
}
}
for (auto* constant : constants) {
auto users = constant->users();
for (auto* user : users) {
if ((IsFusible(*user) || IsGenericTritonFusion(*user)) &&
CanEmitInputFusedScatter(*constant, *user)) {
Fuse(constant, user);
changed = true;
}
}
}
}
fusion_analysis_cache_.Clear();
if (dump_enabled) {
DumpPerModuleProtobufToFile(*module, *fusion_process_dump_,
module->config().debug_options(),
"priority_fusion_dump");
}
return changed;
}
HloInstruction::FusionKind PriorityFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
const auto& analysis = fusion_analysis_cache_.Get(*producer, *consumer);
switch (analysis.GetEmitterFusionKind()) {
case HloFusionAnalysis::EmitterFusionKind::kLoop:
return HloInstruction::FusionKind::kLoop;
case HloFusionAnalysis::EmitterFusionKind::kTriton:
case HloFusionAnalysis::EmitterFusionKind::kCustomFusion:
case HloFusionAnalysis::EmitterFusionKind::kCuDnn:
return HloInstruction::FusionKind::kCustom;
case HloFusionAnalysis::EmitterFusionKind::kConcatenate:
case HloFusionAnalysis::EmitterFusionKind::kReduction:
case HloFusionAnalysis::EmitterFusionKind::kTranspose:
case HloFusionAnalysis::EmitterFusionKind::kInputSlices:
case HloFusionAnalysis::EmitterFusionKind::kScatter:
return HloInstruction::FusionKind::kInput;
}
}
HloInstruction* PriorityFusion::Fuse(HloInstruction* producer,
HloInstruction* consumer) {
VLOG(2) << "Fusing " << producer->ToString() << " into "
<< consumer->ToString();
HloComputation* computation = consumer->parent();
auto kind = ChooseKind(producer, consumer);
HloInstruction* fusion_instruction = consumer;
if (fusion_instruction->opcode() != HloOpcode::kFusion) {
fusion_instruction = computation->AddInstruction(
HloInstruction::CreateFusion(consumer->shape(), kind, consumer));
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
} else if (kind != fusion_instruction->fusion_kind()) {
fusion_instruction->set_fusion_kind(kind);
}
fusion_instruction->set_called_computations_execution_thread(
computation->execution_thread(),
false);
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (fusion_instruction != consumer) {
VLOG(2) << " created new fusion: " << fusion_instruction->ToString();
}
return fusion_instruction;
}
}
} | #include "xla/service/gpu/transforms/priority_fusion.h"
#include <stdint.h>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::IsOkAndHolds;
namespace xla {
namespace gpu {
class PriorityFusionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
std::vector<HloFusionAnalysis::EmitterFusionKind> RunAndGetFusionKinds(
absl::string_view hlo) {
auto module = ParseAndReturnVerifiedModule(hlo).value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
EXPECT_THAT(module->RemoveUnusedComputations(), IsOk());
std::vector<HloFusionAnalysis::EmitterFusionKind> kinds;
for (auto computation : module->computations()) {
if (!computation->FusionInstruction()) continue;
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
*computation->FusionInstruction(), device_info);
kinds.push_back(analysis.GetEmitterFusionKind());
}
return kinds;
}
PriorityFusion priority_fusion_{
nullptr, TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
{},
true}};
};
class PriorityFusionWithTritonEnabledTest : public PriorityFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = PriorityFusionTest::GetDebugOptionsForTest();
debug_options
.set_xla_gpu_experimental_enable_triton_softmax_priority_fusion(true);
return debug_options;
}
};
TEST_F(PriorityFusionTest, FuseWithSharedArgument) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%subtract = f32[] subtract(%p0, %p1)
%compare = pred[] compare(%subtract, %subtract), direction=NE
%add = f32[] add(%p0, %p1)
%abs = f32[] abs(%subtract)
ROOT %select = f32[] select(%compare, %add, %abs)
})")
.value();
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion()));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop);
}
TEST_F(PriorityFusionTest, FusionFusionWithDuplication) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384]{0} parameter(0)
ROOT m = f32[16384]{0} multiply(p, p)
}
exp {
p = f32[16384]{0} parameter(0)
ROOT e = f32[16384]{0} exponential(p)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
p = f32[16384]{0} parameter(0)
s = f32[16384]{0} fusion(p), kind=kLoop, calls=square
e = f32[16384]{0} fusion(s), kind=kLoop, calls=exp
l = f32[16384]{0} fusion(s), kind=kInput, calls=log
ROOT t = (f32[16384], f32[16384]) tuple(l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[16384]{0} parameter(0)
CHECK-NEXT: %[[FUSION_0:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[FUSION_1:.*]] = f32[16384]{0} fusion(%[[PARAM]])
CHECK-NEXT: ROOT {{.*}} tuple(%[[FUSION_0]], %[[FUSION_1]])
)");
}
TEST_F(PriorityFusionTest, FuseBroadcastIntoBitcastConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
param_0 = f32[96]{0} parameter(0)
broadcast = f32[8,96,128,7]{3,2,1,0} broadcast(param_0), dimensions={1}
bitcast.6079.2 = f32[8,24,4,128,7]{4,3,2,1,0} bitcast(broadcast)
ROOT transpose.1990.2 = f32[8,24,128,7,4]{4,3,2,1,0} transpose(bitcast.6079.2), dimensions={0,1,3,4,2}
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f32[96]{0} parameter(0)
CHECK-NEXT: ROOT %{{.*}} fusion(%[[PARAM]])
)");
}
TEST_F(PriorityFusionTest, FuseWideningConvertIntoConsumers) {
absl::string_view kHlo = R"(
HloModule test_module
ENTRY main {
p = f16[512]{0} parameter(0)
a = f16[512]{0} add(p, p)
c = f32[512]{0} convert(a)
s = f32[512]{0} multiply(c, c)
bc = s32[512]{0} bitcast(c)
ROOT t = (f32[512], s32[512]) tuple(s, bc)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK-NEXT: %[[PARAM:.*]] = f16[512]{0} parameter(0)
CHECK-NEXT: %[[FUSION_F32:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[CONVERT_FUSION:.*]] = f32[512]{0} fusion(%[[PARAM]])
CHECK-NEXT: %[[BITCAST:.*]] = s32[512]{0} bitcast(%[[CONVERT_FUSION]])
CHECK-NEXT: ROOT %{{.*}} = (f32[512]{0}, s32[512]{0}) tuple(%[[FUSION_F32]], %[[BITCAST]])
)");
}
TEST_F(PriorityFusionTest, FuseConvertIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion
)");
}
TEST_F(PriorityFusionTest, ReductionEpilogueFusionRegressionTest) {
absl::string_view kHlo = R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
ENTRY main {
param_1.15162 = f32[2752]{0} parameter(1)
convert.44829 = bf16[2752]{0} convert(param_1.15162)
bitcast.24686 = bf16[1,1,2752]{2,1,0} bitcast(convert.44829)
convert.44468 = f32[1,1,2752]{2,1,0} convert(bitcast.24686)
constant_13722 = bf16[] constant(1)
convert.17451 = f32[] convert(constant_13722)
broadcast.17565 = f32[1,1,2752]{2,1,0} broadcast(convert.17451), dimensions={}
negate.167 = f32[1,1,2752]{2,1,0} negate(convert.44468)
exponential.569 = f32[1,1,2752]{2,1,0} exponential(negate.167)
add.1850 = f32[1,1,2752]{2,1,0} add(broadcast.17565, exponential.569)
divide.1376 = f32[1,1,2752]{2,1,0} divide(broadcast.17565, add.1850)
multiply.9709 = f32[1,1,2752]{2,1,0} multiply(convert.44468, divide.1376)
param_0.15005 = f32[2752]{0} parameter(0)
convert.44826 = bf16[2752]{0} convert(param_0.15005)
bitcast.24683 = bf16[1,1,2752]{2,1,0} bitcast(convert.44826)
convert.44467 = f32[1,1,2752]{2,1,0} convert(bitcast.24683)
multiply.9708 = f32[1,1,2752]{2,1,0} multiply(multiply.9709, convert.44467)
convert.16959 = bf16[1,1,2752]{2,1,0} convert(multiply.9708)
fusion.3203 = bf16[2752]{0} bitcast(convert.16959)
convert.15093 = f32[2752]{0} convert(fusion.3203)
broadcast.13841 = f32[8192,2752]{1,0} broadcast(convert.15093), dimensions={1}
param_0.15525 = bf16[8192,2752]{1,0} parameter(2)
convert.13738 = f32[8192,2752]{1,0} convert(param_0.15525)
multiply.6422 = f32[8192,2752]{1,0} multiply(broadcast.13841, convert.13738)
constant_14382 = f32[] constant(0)
fusion.339 = f32[8192]{0} reduce(multiply.6422, constant_14382), dimensions={1}, to_apply=add
convert.44633 = bf16[8192]{0} convert(fusion.339)
ROOT bitcast.24487 = bf16[1,1,8192]{2,1,0} bitcast(convert.44633)
}
)";
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(HloFusionAnalysis::EmitterFusionKind::kLoop,
HloFusionAnalysis::EmitterFusionKind::kReduction));
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: ROOT {{.*}} bitcast({{.*}}fusion{{.*}})
)");
}
TEST_F(PriorityFusionTest, DoNotChangeReductionFusionToLoopFusion) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
rhs.407 = f32[] parameter(1)
lhs.407 = f32[] parameter(0)
ROOT add.24451 = f32[] add(lhs.407, rhs.407)
}
fused_computation {
p0 = f32[16,64]{1,0} parameter(0)
zero = f32[] constant(0.0)
ROOT reduce = f32[16]{0} reduce(p0, zero), dimensions={1}, to_apply=add
}
ENTRY main {
param0 = f32[16,64]{1,0} parameter(0)
fusion = f32[16]{0} fusion(param0), kind=kLoop, calls=fused_computation
ROOT slice = f32[8]{0} slice(fusion), slice={[0:8]}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseTransposeIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.17323 = pred[2048,2048]{1,0} parameter(0)
broadcast.22829 = pred[1,12,2048,2048]{3,2,1,0} broadcast(param_0.17323), dimensions={2,3}
param_1.19761 = bf16[2048,24576]{1,0} parameter(1)
convert.29880.clone.1 = f32[2048,24576]{1,0} convert(param_1.19761)
constant_10033_clone_1 = bf16[] constant(0.02002)
convert.30056.clone.1 = f32[] convert(constant_10033_clone_1)
broadcast.18898.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30056.clone.1), dimensions={}
multiply.13451.clone.1 = f32[2048,24576]{1,0} multiply(convert.29880.clone.1, broadcast.18898.clone.1)
tanh.798.clone.1 = f32[2048,24576]{1,0} tanh(multiply.13451.clone.1)
constant_10244_clone_1 = bf16[] constant(50)
convert.30039.clone.1 = f32[] convert(constant_10244_clone_1)
broadcast.18310.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30039.clone.1), dimensions={}
multiply.12550.clone.1 = f32[2048,24576]{1,0} multiply(tanh.798.clone.1, broadcast.18310.clone.1)
convert.29370.clone.1 = bf16[2048,24576]{1,0} convert(multiply.12550.clone.1)
bitcast.1 = bf16[2048,2048,12]{2,1,0} bitcast(convert.29370.clone.1)
transpose.6582 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0}
bitcast = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6582)
convert.33705 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast)
constant_10212 = f32[] constant(-2.38197633e+38)
broadcast.22828 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10212), dimensions={}
select.589 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22829, convert.33705, broadcast.22828)
bitcast.22075 = f32[12,2048,2048]{2,1,0} bitcast(select.589)
constant_10192 = f32[] constant(-inf)
reduce.1614 = f32[12,2048]{1,0} reduce(bitcast.22075, constant_10192), dimensions={2}, to_apply=add
predarg = pred[1,1,2048,2048]{3,2,1,0} parameter(2)
bitcast.11069 = pred[2048,2048]{1,0} bitcast(predarg)
broadcast.22825 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
transpose.6580 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0}
bitcast.2 = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6580)
convert.33703 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast.2)
constant_10213 = f32[] constant(-2.38197633e+38)
broadcast.22824 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10213), dimensions={}
select.587 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22825, convert.33703, broadcast.22824)
broadcast.22819 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1129 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.587, broadcast.22819)
exponential.418 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1129)
bitcast.22074 = f32[12,2048,2048]{2,1,0} bitcast(exponential.418)
constant_10490 = f32[] constant(0)
reduce.1613 = f32[12,2048]{1,0} reduce(bitcast.22074, constant_10490), dimensions={2}, to_apply=add
constant_468 = f32[] constant(-2.38197633e+38)
broadcast.22833 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3}
transpose.6584 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0}
bitcast.3 = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6584)
convert.33707 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast.3)
broadcast.22832 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_468), dimensions={}
select.591 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22833, convert.33707, broadcast.22832)
broadcast.22821 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2}
subtract.1131 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.591, broadcast.22821)
exponential.420 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1131)
broadcast.18351 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1613), dimensions={1,2}
divide.340 = f32[1,12,2048,2048]{3,2,1,0} divide(exponential.420, broadcast.18351)
ROOT convert.29418 = bf16[1,12,2048,2048]{3,2,1,0} convert(divide.340)
})";
using Kind = HloFusionAnalysis::EmitterFusionKind;
EXPECT_THAT(
RunAndGetFusionKinds(kHlo),
UnorderedElementsAre(Kind::kLoop, Kind::kLoop, Kind::kLoop,
Kind::kReduction, Kind::kReduction, Kind::kTranspose,
Kind::kTranspose, Kind::kTranspose));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, ConvertFusedIntoReduce) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
ENTRY main {
param_0_0.79 = bf16[1024,8192]{1,0} parameter(0)
param_1_0.79 = bf16[1024,8192]{1,0} parameter(1)
param_2.483 = f32[8192]{0} parameter(2)
param_4.2892 = bf16[1024,8192]{1,0} parameter(3)
convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79)
convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79)
constant_7773 = f32[] constant(0)
broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1}
multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854)
reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add
convert.13970 = bf16[1024]{0} convert(reduce.4813)
convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892)
multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534)
reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1)
multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855)
reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add
convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1)
ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK-COUNT-3: ROOT {{.*}} convert(
CHECK: ENTRY %main
CHECK-COUNT-3: fusion(
CHECK-NOT: fusion(
)");
}
TEST_F(PriorityFusionTest, DoNotFuseDynamicUpdateSliceIntoReduce) {
GTEST_SKIP() << "b/294198633";
absl::string_view kHlo = R"(
HloModule test_module
add {
Arg_1.1046 = f32[] parameter(1)
Arg_0.1045 = f32[] parameter(0)
ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046)
}
ENTRY main {
param_0.10549 = f32[4,2112]{1,0} parameter(0)
param_5.2561 = pred[] parameter(5)
broadcast.19725 = pred[4,1]{1,0} broadcast(param_5.2561), dimensions={}
param_1.11587 = pred[4]{0} parameter(1)
constant_5837 = f32[] constant(1)
broadcast.19723 = f32[4]{0} broadcast(constant_5837), dimensions={}
param_2.5952 = f32[4,8000]{1,0} parameter(2)
param_3.4004 = f32[4]{0} parameter(3)
broadcast.19718 = f32[4,8000]{1,0} broadcast(param_3.4004), dimensions={0}
subtract.1112 = f32[4,8000]{1,0} subtract(param_2.5952, broadcast.19718)
exponential.418 = f32[4,8000]{1,0} exponential(subtract.1112)
constant_6254 = f32[] constant(0)
reduce.1154 = f32[4]{0} reduce(exponential.418, constant_6254), dimensions={1}, to_apply=add
log.38 = f32[4]{0} log(reduce.1154)
broadcast.19717 = f32[4,8000]{1,0} broadcast(log.38), dimensions={0}
subtract.1111 = f32[4,8000]{1,0} subtract(subtract.1112, broadcast.19717)
iota.170 = s32[4,1]{1,0} iota(), iota_dimension=0
constant_6281 = s32[] constant(0)
broadcast.19735 = s32[4]{0} broadcast(constant_6281), dimensions={}
param_4.3400 = s32[4,8000]{1,0} parameter(4)
slice.3186 = s32[4,40]{1,0} slice(param_4.3400), slice={[0:4], [0:40]}
iota.168 = s32[4,1]{1,0} iota(), iota_dimension=0
param_7.1596 = s32[4]{0} parameter(7)
compare.341 = pred[4]{0} compare(param_7.1596, broadcast.19735), direction=LT
constant_5833 = s32[] constant(40)
broadcast.19731 = s32[4]{0} broadcast(constant_5833), dimensions={}
add.8348 = s32[4]{0} add(param_7.1596, broadcast.19731)
select.418 = s32[4]{0} select(compare.341, add.8348, param_7.1596)
bitcast.20942 = s32[4,1]{1,0} bitcast(select.418)
concatenate.1337 = s32[4,2]{1,0} concatenate(iota.168, bitcast.20942), dimensions={1}
gather.43 = s32[4,1,1]{2,1,0} gather(slice.3186, concatenate.1337), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20941 = s32[4]{0} bitcast(gather.43)
select.398 = s32[4]{0} select(param_1.11587, broadcast.19735, bitcast.20941)
compare.334 = pred[4]{0} compare(select.398, broadcast.19735), direction=LT
constant_6260 = s32[] constant(8000)
broadcast.19720 = s32[4]{0} broadcast(constant_6260), dimensions={}
add.8336 = s32[4]{0} add(select.398, broadcast.19720)
select.396 = s32[4]{0} select(compare.334, add.8336, select.398)
bitcast.20830 = s32[4,1]{1,0} bitcast(select.396)
concatenate.1308 = s32[4,2]{1,0} concatenate(iota.170, bitcast.20830), dimensions={1}
gather.41 = f32[4,1,1]{2,1,0} gather(subtract.1111, concatenate.1308), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1}
bitcast.20824 = f32[4]{0} bitcast(gather.41)
select.389 = f32[4]{0} select(param_1.11587, broadcast.19723, bitcast.20824)
bitcast.20823 = f32[4,1]{1,0} bitcast(select.389)
param_6.1719 = s32[] parameter(6)
constant_6323 = s32[] constant(2048)
add.8549 = s32[] add(param_6.1719, constant_6323)
compare.388 = pred[] compare(add.8549, constant_6281), direction=LT
constant_5436 = s32[] constant(4160)
add.8339 = s32[] add(param_6.1719, constant_5436)
select.409 = s32[] select(compare.388, add.8339, add.8549)
dynamic-slice.36 = f32[4,1]{1,0} dynamic-slice(param_0.10549, constant_6281, select.409), dynamic_slice_sizes={4,1}
select.388 = f32[4,1]{1,0} select(broadcast.19725, bitcast.20823, dynamic-slice.36)
ROOT dynamic-update-slice.307 = f32[4,2112]{1,0} dynamic-update-slice(param_0.10549, select.388, constant_6281, select.409)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} dynamic-update-slice(
CHECK: %[[REDUCE:.*]] = {{.*}} reduce(
CHECK: ROOT {{.*}} log(%[[REDUCE]])
CHECK: ENTRY
CHECK-COUNT-2: fusion(
)");
}
TEST_F(PriorityFusionTest, DontFuseIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
p0 = s32[3,3] parameter(0)
operand = s32[3,3] add(p0, p0)
p1 = s32[2] parameter(1)
indices = s32[2] add(p1, p1)
p2 = s32[2,3] parameter(2)
updates = s32[2,3] add(p2, p2)
scatter = s32[3,3] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
ROOT add = s32[3,3] add(scatter, scatter)
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion())));
EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add())));
}
TEST_F(PriorityFusionTest, DontFuseConstantIntoFirstOperandOfScatter) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY FuseIntoScatter {
operand = s32[1] constant({0})
indices = s32[24,1] parameter(0)
constant = s32[] constant(1)
updates = s32[24,1] broadcast(constant)
ROOT scatter = s32[1] scatter(operand, indices, updates),
to_apply=add,
update_window_dims={1},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter())));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput);
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Scatter(m::Parameter(), m::Parameter(),
m::Broadcast(m::Constant()))));
}
TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduceEvenIfOccupancyIsHigh) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
p0 = f32[4,3584,128,168]{3,2,1,0} parameter(0)
c = f32[] constant(0)
r1 = f32[4,3584,128]{2,1,0} reduce(p0, c), dimensions={3}, to_apply=add
ROOT r2 = f32[4,3584]{1,0} reduce(r1, c), dimensions={2}, to_apply=add
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} reduce(
CHECK: ROOT {{.*}} reduce(
)");
}
TEST_F(PriorityFusionTest, FuseReductionEpilogueWithMultipleUsers) {
constexpr absl::string_view kHlo = R"(
HloModule test_module
add {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
fused_computation {
p0 = f32[64,16384]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT reduce.858 = f32[64]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
ENTRY main {
p0 = f32[64,16384]{1,0} parameter(0)
fusion = f32[64]{0} fusion(p0), kind=kInput, calls=fused_computation
log = f32[64]{0} log(fusion)
negate = f32[64]{0} custom-call(log), custom_call_target="negate"
ROOT add = f32[64]{0} add(negate, log)
}
)";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ENTRY
CHECK: %[[PARAM:.*]] = {{.*}} parameter(0)
CHECK: %[[FUSION:.*]] = {{.*}} fusion(%[[PARAM]])
CHECK: custom-call(%[[FUSION]])
)");
}
TEST_F(PriorityFusionTest, EpilogueFusion) {
absl::string_view kHlo = R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add
}
fused_computation.2 {
p0 = f32[8,4,128]{2,1,0} parameter(0)
r1 = f32[8,4,128]{2,1,0} log(p0)
ROOT r2 = f32[8,4,128]{2,1,0} log(r1)
}
ENTRY main {
p0 = f32[8,4,128,226]{3,2,1,0} parameter(0)
f1 = f32[8,4,128]{2,1,0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[8,4,128]{2,1,0} fusion(f1), kind=kLoop, calls=%fused_computation.2
})";
RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"(
CHECK: ROOT {{.*}} = f32[8,4,128]{2,1,0} fusion(%p{{.*}}), kind=kInput, calls=%fused_computation)");
}
TEST_F(PriorityFusionTest, EpilogueFusionFails) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add.13235 = f32[] add(p0, p1)
}
fused_computation.1 {
p0 = f32[28672,4096]{1,0} parameter(0)
c0 = f32[] constant(0)
ROOT r = f32[28672]{0} reduce(p0, c0), dimensions={1}, to_apply=add
}
fused_computation.2 {
p0 = f32[28672]{0} parameter(0)
p1 = f32[28672]{0} parameter(1)
ROOT a = f32[28672]{0} add(p0, p1)
}
ENTRY main {
p0 = f32[28672,4096]{1,0} parameter(0)
p1 = f32[28672]{0} parameter(1)
f = f32[28672]{0} fusion(p0), kind=kInput, calls=%fused_computation.1
ROOT fusion = f32[28672]{0} fusion(f,p1), kind=kLoop, calls=%fused_computation.2
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DoNotFuseIntoRoot) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY %main (p.0: u32[2], p.1: u32[]) -> u32[2] {
%p.0 = u32[2]{0} parameter(0)
%p.1 = u32[] parameter(1)
ROOT %broadcast = u32[2]{0} broadcast(u32[] %p.1), dimensions={}, sharding={replicated}
%add = u32[2]{0} add(u32[2]{0} %p.0, u32[2]{0} %broadcast)
%tuple.1 = (u32[2]{0}) tuple(u32[2]{0} %add)
%token.0 = token[] after-all()
%outfeed.6 = token[] outfeed((u32[2]{0}) %tuple.1, token[] %token.0), outfeed_shape=(u32[2]{0}), sharding={maximal device=0}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, DontFuseConcat) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
%maximum (param_0: f32[], param_1: f32[]) -> f32[] {
%param_0 = f32[] parameter(0)
%param_1 = f32[] parameter(1)
ROOT %maximum = f32[] maximum(f32[] %param_0, f32[] %param_1)
}
%fused_concat (param_0: f32[1,4,401,8,8], param_1: f32[1,1,4,1023,8], param_2: bf16[1,4,1023,8,8]) -> f32[1,4,1424,8,8] {
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%convert = f32[1,4,1023,8,8]{4,3,2,1,0} convert(bf16[1,4,1023,8,8]{4,3,2,1,0} %param_2)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1023,8]{2,1,0} bitcast(f32[1,1,4,1023,8]{4,3,2,1,0} %param_1)
%broadcast = f32[1,4,1023,8,8]{4,3,2,1,0} broadcast(f32[4,1023,8]{2,1,0} %bitcast), dimensions={1,2,4}
%add = f32[1,4,1023,8,8]{4,3,2,1,0} add(f32[1,4,1023,8,8]{4,3,2,1,0} %convert, f32[1,4,1023,8,8]{4,3,2,1,0} %broadcast)
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
ROOT %concatenate = f32[1,4,1424,8,8]{4,3,2,1,0} concatenate(f32[1,4,1023,8,8]{4,3,2,1,0} %add, f32[1,4,401,8,8]{4,3,2,1,0} %param_0), dimensions={2}
}
%fused_reduce (param_0: f32[], param_1: f32[1,4,1424,8,8]) -> f32[4,8,8] {
%param_1 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(1)
%bitcast = f32[4,1424,8,8]{3,2,1,0} bitcast(f32[1,4,1424,8,8]{4,3,2,1,0} %param_1)
%param_0 = f32[] parameter(0)
ROOT %reduce = f32[4,8,8]{2,1,0} reduce(f32[4,1424,8,8]{3,2,1,0} %bitcast, f32[] %param_0), dimensions={1}, to_apply=%maximum
}
%fused_broadcast (param_0: f32[1,4,1424,8,8], param_1: f32[4,8,8]) -> f32[1,4,1424,8,8] {
%param_0 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[4,8,8]{2,1,0} parameter(1)
%broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} broadcast(f32[4,8,8]{2,1,0} %param_1), dimensions={1,3,4}
ROOT %subtract = f32[1,4,1424,8,8]{4,3,2,1,0} subtract(f32[1,4,1424,8,8]{4,3,2,1,0} %param_0, f32[1,4,1424,8,8]{4,3,2,1,0} %broadcast)
}
ENTRY fusion {
%param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0)
%param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1)
%param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2)
%concat = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%param_0, %param_1, %param_2), kind=kLoop, calls=fused_concat
%param_3 = f32[] parameter(3)
%reduce = f32[4,8,8]{2,1,0} fusion(%param_3, %concat), kind=kLoop, calls=fused_reduce
%param_4 = f32[4,8,8]{2,1,0} parameter(4)
%broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%concat, %param_4), kind=kLoop, calls=fused_broadcast
ROOT tuple = (f32[4,8,8]{2,1,0}, f32[1,4,1424,8,8]{4,3,2,1,0}) tuple(%reduce, %broadcast)
}
)");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionTest, FuseOnlySmallConstant) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY main {
param_0 = f32[32,32]{1,0} parameter(0)
c_1 = f32[] constant(1)
c_2 = f32[32,32] constant({...})
broadcast = f32[32,32]{1,0} broadcast(c_1), dimensions={}
add = f32[32,32]{1,0} add(param_0, broadcast)
ROOT mul = f32[32,32]{1,0} multiply(c_2, add)
}
)");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Multiply(
m::Parameter(),
m::Add(m::Parameter(), m::Broadcast(m::Constant())))));
}
TEST_F(PriorityFusionTest, FuseSmallConstantIntoTritonFusion) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_computation {
param_0 = f32[32,64] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[32] reduce(param_0, param_1), dimensions={1}, to_apply=add
}
ENTRY main {
param_0 = f32[32,64] parameter(0)
c_0 = f32[] constant(0)
ROOT triton_softmax = f32[32] fusion(param_0, c_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1"],"num_warps":"1"}}}
})");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Reduce(m::Parameter(), m::Constant())));
}
TEST_F(PriorityFusionTest, DoNotFuseProducerConsumerMergedTooLarge) {
auto module = *ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation.1 {
iota.9.7 = s32[3,1,1]{2,1,0} iota(), iota_dimension=0
param_3.29 = s32[] parameter(2)
pad.2.7 = s32[3,1,2]{2,1,0} pad(iota.9.7, param_3.29), padding=0_0x0_0x0_1
param_2.39 = s32[] parameter(1)
broadcast.76.1 = s32[3,1,2]{2,1,0} broadcast(param_2.39), dimensions={}
compare.9.1 = pred[3,1,2]{2,1,0} compare(pad.2.7, broadcast.76.1), direction=GE
param_1.73 = s32[2]{0} parameter(0)
broadcast.78.1 = s32[3,2]{1,0} broadcast(param_1.73), dimensions={1}
bitcast.1 = s32[3,2]{1,0} bitcast(pad.2.7)
compare.10.1 = pred[3,2]{1,0} compare(bitcast.1, broadcast.78.1), direction=LE
bitcast.2 = pred[3,1,2]{2,1,0} bitcast(compare.10.1)
ROOT and.3.1 = pred[3,1,2]{2,1,0} and(compare.9.1, bitcast.2)
}
and {
x = pred[] parameter(0)
y = pred[] parameter(1)
ROOT and = pred[] and(x, y)
}
fused_computation.2 {
param0 = pred[3,1,2]{2,1,0} parameter(0)
slice = pred[1,1,2]{2,1,0} slice(param0), slice={[0:1], [0:1], [0:2]}
bitcast = pred[2]{0} bitcast(slice)
init = pred[] constant(true)
reduce = pred[2]{0} reduce(param0, init), dimensions={0,1}, to_apply=and
and = pred[2]{0} and(bitcast, reduce)
pad = pred[3]{0} pad(and, init), padding=0_1
broadcast = pred[3,2]{1,0} broadcast(pad), dimensions={0}
bitcast2 = pred[6]{0} bitcast(broadcast)
broadcast2 = pred[2,3]{1,0} broadcast(pad), dimensions={1}
bitcast3 = pred[6]{0} bitcast(broadcast2)
ROOT and2 = pred[6]{0} and(bitcast2, bitcast3)
}
ENTRY main {
p0 = s32[2]{0} parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
fusion1 = pred[3,1,2]{2,1,0} fusion(p0, p1, p2), kind=kLoop, calls=fused_computation.1
ROOT fusion2 = pred[6]{0} fusion(fusion1), kind=kInput, calls=fused_computation.2
}
)");
auto& debug_options = module->mutable_config().mutable_debug_options();
debug_options.set_xla_gpu_mlir_emitter_level(3);
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(PriorityFusionWithTritonEnabledTest,
CanMergeTritonFusionWithBothProducerAndConsumer) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
producer_computation {
parameter_0 = f32[125]{0} parameter(0)
ROOT broadcast = f32[125,127]{1,0} broadcast(parameter_0), dimensions={0}
}
consumer_computation {
parameter_0 = f32[125,127]{1,0} parameter(0)
parameter_1 = f32[125,127]{1,0} parameter(1)
ROOT multiply = f32[125,127]{1,0} multiply(parameter_1, parameter_0)
}
triton_softmax_computation {
parameter_0 = f32[125,127]{1,0} parameter(0)
multiply_0 = f32[125,127]{1,0} multiply(parameter_0, parameter_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[125]{0} parameter(0)
param_1 = f32[125,127]{1,0} parameter(1)
producer_fusion = f32[125,127]{1,0} fusion(param_0), kind=kLoop, calls=producer_computation
triton_softmax = f32[125,127]{1,0} fusion(producer_fusion), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}}
ROOT consumer_fusion = f32[125,127]{1,0} fusion(param_1, triton_softmax), kind=kLoop, calls=consumer_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
EXPECT_TRUE(priority_fusion_.Run(module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kCustom);
ASSERT_TRUE(IsGenericTritonFusion(*root));
EXPECT_TRUE(root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_block_level_fusion_config());
EXPECT_EQ(root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.block_level_fusion_config()
.output_tile_sizes_size(),
2);
}
TEST_F(PriorityFusionWithTritonEnabledTest,
FuseTritonProducerWithTwoConsumers) {
const std::string kHloText = R"(
HloModule t
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
producer_computation {
parameter_0 = f32[125]{0} parameter(0)
ROOT broadcast = f32[125,127] broadcast(parameter_0), dimensions={0}
}
consumer_computation.1 {
parameter_0 = f32[125,127] parameter(0)
ROOT log = f32[125,127] log(parameter_0)
}
consumer_computation.2 {
parameter_0 = f32[125,127] parameter(0)
ROOT exp = f32[125,127] exponential(parameter_0)
}
ENTRY main {
param_0 = f32[125]{0} parameter(0)
producer_fusion = f32[125,127] fusion(param_0), kind=kCustom, calls=producer_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}}
consumer_fusion.1 = f32[125,127] fusion(producer_fusion), kind=kLoop, calls=consumer_computation.1
consumer_fusion.2 = f32[125,127] fusion(producer_fusion), kind=kLoop, calls=consumer_computation.2
ROOT tuple = (f32[125,127], f32[125,127]) tuple(consumer_fusion.1, consumer_fusion.2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
EXPECT_TRUE(priority_fusion_.Run(module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
HloInstruction* root = module->entry_computation()->root_instruction();
HloInstruction *fusion1, *fusion2;
EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(&fusion1, m::Parameter()),
m::Fusion(&fusion2, m::Parameter()))));
EXPECT_TRUE(IsGenericTritonFusion(*fusion1));
TF_ASSERT_OK_AND_ASSIGN(auto backend_config1,
fusion1->backend_config<GpuBackendConfig>());
EXPECT_TRUE(
backend_config1.fusion_backend_config().has_block_level_fusion_config());
EXPECT_EQ(backend_config1.fusion_backend_config()
.block_level_fusion_config()
.output_tile_sizes_size(),
2);
EXPECT_TRUE(IsGenericTritonFusion(*fusion2));
TF_ASSERT_OK_AND_ASSIGN(auto backend_config2,
fusion2->backend_config<GpuBackendConfig>());
EXPECT_TRUE(
backend_config2.fusion_backend_config().has_block_level_fusion_config());
EXPECT_EQ(backend_config2.fusion_backend_config()
.block_level_fusion_config()
.output_tile_sizes_size(),
2);
}
TEST_F(PriorityFusionWithTritonEnabledTest,
TritonProducerNotSupported_DoNotFuse) {
const std::string kHloText = R"(
HloModule t
producer_computation {
parameter_0 = c64[] parameter(0)
broadcast = c64[125,127] broadcast(parameter_0), dimensions={}
ROOT real = f32[125,127] real(broadcast)
}
triton_computation {
parameter_0 = f32[125,127] parameter(0)
parameter_1 = f32[125,127] parameter(1)
ROOT add = f32[125,127] add(parameter_0, parameter_1)
}
ENTRY main {
param_0 = c64[] parameter(0)
param_1 = f32[125,127] parameter(1)
producer_fusion = f32[125,127] fusion(param_0), kind=kLoop, calls=producer_computation
ROOT triton_fusion = f32[125,127] fusion(producer_fusion, param_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
EXPECT_FALSE(priority_fusion_.Run(module.get()).value());
}
TEST_F(PriorityFusionWithTritonEnabledTest,
TritonConsumerNotSupported_DoNotFuse) {
const std::string kHloText = R"(
HloModule t
triton_computation {
parameter_0 = f32[] parameter(0)
ROOT boardcast = f32[125,127] broadcast(parameter_0), dimensions={}
}
consumer_computation {
parameter_0 = c64[] parameter(0)
parameter_1 = f32[125,127] parameter(1)
broadcast = c64[125,127] broadcast(parameter_0), dimensions={}
real = f32[125,127] real(broadcast)
ROOT add = f32[125,127] add(real, parameter_1)
}
ENTRY main {
param_0 = f32[] parameter(1)
param_1 = c64[] parameter(0)
triton_fusion = f32[125,127] fusion(param_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}}
ROOT consumer_fusion = f32[125,127] fusion(param_1, triton_fusion), kind=kLoop, calls=consumer_computation
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText));
EXPECT_FALSE(priority_fusion_.Run(module.get()).value());
}
TEST_F(PriorityFusionTest, DoNotFuseInsideReducer) {
auto module = *ParseAndReturnVerifiedModule(R"(
%reducer {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add = f32[] add(p0, p1)
ROOT max = f32[] maximum(add, p0)
}
%fused_reduce {
p0 = f32[256] parameter(0)
p1 = f32[] parameter(1)
ROOT reduce = f32[] reduce(p0, p1), dimensions={0}, to_apply=%reducer
}
ENTRY fusion {
p0 = f32[256] parameter(0)
p1 = f32[] parameter(1)
ROOT %reduce = f32[] fusion(p0, p1), kind=kInput, calls=fused_reduce
}
)");
EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/priority_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/priority_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce1aac24-e987-4099-af49-172276ddf340 | cpp | google/arolla | timeseries | arolla/qexpr/operators/experimental/dense_array/timeseries.h | arolla/qexpr/operators/experimental/dense_array/timeseries_test.cc | #ifndef AROLLA_QEXPR_OPERATORS_EXPERIMENTAL_DENSE_ARRAY_TIMESERIES_H_
#define AROLLA_QEXPR_OPERATORS_EXPERIMENTAL_DENSE_ARRAY_TIMESERIES_H_
#include <cstdint>
#include <deque>
#include <optional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/ops/dense_group_ops.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/aggregation_ops_interface.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/util/meta.h"
#include "arolla/util/view_types.h"
namespace arolla {
namespace moving_average_operator_impl {
template <typename ScalarT>
class MovingAverageAccumulator final
: public Accumulator<AccumulatorType::kPartial, OptionalValue<ScalarT>,
meta::type_list<>,
meta::type_list<OptionalValue<ScalarT>>> {
public:
explicit MovingAverageAccumulator(int window_size)
: window_size_(window_size) {}
void Reset() final {
current_window_.clear();
window_sum_ = 0;
}
void Add(OptionalValue<ScalarT> tail_value) final {
if (tail_value.present) {
current_window_.push_front(tail_value.value);
window_sum_ += tail_value.value;
} else {
Reset();
}
}
OptionalValue<ScalarT> GetResult() final {
if (current_window_.size() == window_size_) {
auto result = window_sum_ / window_size_;
window_sum_ -= current_window_.back();
current_window_.pop_back();
return result;
} else {
return std::nullopt;
}
}
private:
std::deque<ScalarT> current_window_;
int window_size_;
double window_sum_ = 0;
};
}
struct AggMovingAverageOp {
template <typename ScalarT>
absl::StatusOr<DenseArray<ScalarT>> operator()(
EvaluationContext* ctx,
const DenseArray<ScalarT>& series,
const int64_t window_size, const DenseArrayEdge& edge) const {
using MovingAvgAcc =
moving_average_operator_impl::MovingAverageAccumulator<ScalarT>;
DenseGroupOps<MovingAvgAcc> agg(&ctx->buffer_factory(),
MovingAvgAcc(window_size));
return agg.Apply(edge, series);
}
};
struct ExponentialWeightedMovingAverageOp {
template <typename ScalarT>
DenseArray<ScalarT> AdjustedEWMA(const DenseArray<ScalarT>& series,
double alpha,
bool ignore_missing = false) const {
DenseArrayBuilder<ScalarT> builder(series.size());
int64_t previous_non_missing_id = -1;
double previous_non_missing_value = 0;
double current_ewma_numerator = 0;
double current_ewma_denominator = 0;
series.ForEach([&](int64_t current_row_id, bool present,
view_type_t<ScalarT> tail_value) {
if (!present) return;
if (previous_non_missing_id >= 0) {
for (int64_t i = previous_non_missing_id + 1; i < current_row_id; i++) {
builder.Set(i, previous_non_missing_value);
if (!ignore_missing) {
current_ewma_numerator *= (1.0 - alpha);
current_ewma_denominator *= (1.0 - alpha);
}
}
}
current_ewma_numerator =
tail_value + (1. - alpha) * current_ewma_numerator;
current_ewma_denominator = 1. + (1. - alpha) * current_ewma_denominator;
previous_non_missing_value =
current_ewma_numerator / current_ewma_denominator;
builder.Set(current_row_id, previous_non_missing_value);
previous_non_missing_id = current_row_id;
});
return std::move(builder).Build();
}
template <typename ScalarT>
DenseArray<ScalarT> UnadjustedEWMA(const DenseArray<ScalarT>& series,
double alpha,
bool ignore_missing = false) const {
DenseArrayBuilder<ScalarT> builder(series.size());
int64_t previous_non_missing_id = -1;
double previous_non_missing_value = 0;
series.ForEach([&](int64_t current_row_id, bool present,
view_type_t<ScalarT> tail_value) {
if (!present) return;
double previous_weight = (1. - alpha);
if (previous_non_missing_id >= 0) {
for (int64_t i = previous_non_missing_id + 1; i < current_row_id; i++) {
builder.Set(i, previous_non_missing_value);
if (!ignore_missing) {
previous_weight *= (1. - alpha);
}
}
} else {
previous_non_missing_value = tail_value;
}
previous_non_missing_value =
(alpha * tail_value + previous_weight * previous_non_missing_value) /
(alpha + previous_weight);
builder.Set(current_row_id, previous_non_missing_value);
previous_non_missing_id = current_row_id;
});
return std::move(builder).Build();
}
template <typename ScalarT>
absl::StatusOr<DenseArray<ScalarT>> operator()(
const DenseArray<ScalarT>& series, double alpha, bool adjust = true,
bool ignore_missing = false) const {
if (alpha <= 0 || alpha > 1) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("alpha must be in range (0, 1], got %f", alpha));
}
if (adjust) {
return AdjustedEWMA(series, alpha, ignore_missing);
} else {
return UnadjustedEWMA(series, alpha, ignore_missing);
}
}
};
}
#endif | #include <cstdint>
#include <initializer_list>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/qexpr/operators.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
absl::StatusOr<DenseArrayEdge> CreateEdgeFromSplitPoints(
std::initializer_list<int64_t> splits) {
return DenseArrayEdge::FromSplitPoints({CreateBuffer<int64_t>(splits)});
}
const char kAggMovingAverage[] = "experimental.agg_moving_average";
constexpr auto NA = std::nullopt;
TEST(AggMovingAverage, FullTimeSeries) {
const auto series = CreateDenseArray<float>({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{3}, edge),
IsOkAndHolds(ElementsAre(NA, NA, 2, 3, 4, 5, 6, 7)));
}
TEST(AggMovingAverage, TimeSeriesWithMissingValue) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{3}, edge),
IsOkAndHolds(ElementsAre(NA, NA, 2, NA, NA, NA, 6, 7)));
}
TEST(AggMovingAverage, ZeroWindow) {
const auto series = CreateDenseArray<float>({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{0}, edge),
IsOkAndHolds(ElementsAre(NA, NA, NA, NA, NA, NA, NA, NA)));
}
TEST(AggMovingAverage, WindowSizeEqualToInputArraySize) {
const auto series = CreateDenseArray<float>({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{8}, edge),
IsOkAndHolds(ElementsAre(NA, NA, NA, NA, NA, NA, NA, 4.5)));
}
TEST(AggMovingAverage, WindowSizeLargerThanInputArraySize) {
const auto series = CreateDenseArray<float>({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{9}, edge),
IsOkAndHolds(ElementsAre(NA, NA, NA, NA, NA, NA, NA, NA)));
}
TEST(AggMovingAverage, EmptyTimeSeries) {
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(
kAggMovingAverage, DenseArray<float>(), int64_t{3}, edge),
IsOkAndHolds(ElementsAre()));
}
TEST(AggMovingAverage, FullTimeSeriesWithTwoGroups) {
const auto series = CreateDenseArray<float>({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_OK_AND_ASSIGN(auto edge, CreateEdgeFromSplitPoints({0, 4, 8}));
EXPECT_THAT(InvokeOperator<DenseArray<float>>(kAggMovingAverage, series,
int64_t{3}, edge),
IsOkAndHolds(ElementsAre(NA, NA, 2, 3, NA, NA, 6, 7)));
}
TEST(ExponentialWeightedMovingAverageOpTest, MissingValue_Adjust) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5, 6, 7, 8});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
true,
false),
IsOkAndHolds(ElementsAre(1., 1.71428571, 2.53846154, 2.53846154,
4.50832266, 5.50288031, 6.43861754,
7.39069488)));
}
TEST(ExponentialWeightedMovingAverageOpTest,
MissingValue_Adjust_IgnoreMissing) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5, 6, 7, 8});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
true,
true),
IsOkAndHolds(ElementsAre(1., 1.71428571, 2.53846154, 2.53846154,
4.05418719, 5.23375364, 6.29786003,
7.32082003)));
}
TEST(ExponentialWeightedMovingAverageOpTest, FirstMissing_Adjust) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
true,
false),
IsOkAndHolds(ElementsAre(NA, 2., 2.71428571)));
}
TEST(ExponentialWeightedMovingAverageOpTest, FirstTwoMissing_Adjust) {
const auto series = CreateDenseArray<float>({NA, NA, 3, NA, 5});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
true,
false),
IsOkAndHolds(ElementsAre(NA, NA, 3., 3., 4.72413793)));
}
TEST(ExponentialWeightedMovingAverageOpTest,
FirstTwoMissing_Adjust_IgnoreMissing) {
const auto series = CreateDenseArray<float>({NA, NA, 3, NA, 5});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
true,
true),
IsOkAndHolds(ElementsAre(NA, NA, 3., 3., 4.42857143)));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaLessThanZero_Adjust) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series,
-1.2, true,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsZero_Adjust) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.,
true,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaGreaterThanOne_Adjust) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.2,
true,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsOne_Adjust) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
true,
false),
IsOkAndHolds(ElementsAre(1, 2, 3, 3, 5)));
}
TEST(ExponentialWeightedMovingAverageOpTest,
AlphaEqualsOne_Adjust_IgnoreMissing) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
true,
true),
IsOkAndHolds(ElementsAre(1, 2, 3, 3, 5)));
}
TEST(ExponentialWeightedMovingAverageOpTest,
AlphaEqualsOne_Adjust_FirstMissing) {
const auto series = CreateDenseArray<float>({NA, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
true,
false),
IsOkAndHolds(ElementsAre(NA, 2, 3, 3, 5)));
}
TEST(ExponentialWeightedMovingAverageOpTest, EmptyTimeSeries) {
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma",
DenseArray<float>(), 1.,
false,
false),
IsOkAndHolds(ElementsAre()));
}
TEST(ExponentialWeightedMovingAverageOpTest, MissingValue) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5, 6, 7, 8});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
false,
false),
IsOkAndHolds(ElementsAre(1., 1.6, 2.44, 2.44, 4.46105263, 5.38442105,
6.35376842, 7.34150737)));
}
TEST(ExponentialWeightedMovingAverageOpTest, MissingValue_IgnoreMissing) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5, 6, 7, 8});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
false,
true),
IsOkAndHolds(
ElementsAre(1., 1.6, 2.44, 2.44, 3.976, 5.1904, 6.27616, 7.310464)));
}
TEST(ExponentialWeightedMovingAverageOpTest, FirstMissing) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
false,
false),
IsOkAndHolds(ElementsAre(NA, 2., 2.6)));
}
TEST(ExponentialWeightedMovingAverageOpTest, FirstTwoMissing) {
const auto series = CreateDenseArray<float>({NA, NA, 3, NA, 5});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
false,
false),
IsOkAndHolds(ElementsAre(NA, NA, 3., 3., 4.57894737)));
}
TEST(ExponentialWeightedMovingAverageOpTest, FirstTwoMissing_IgnoreMissing) {
const auto series = CreateDenseArray<float>({NA, NA, 3, NA, 5});
EXPECT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.6,
false,
true),
IsOkAndHolds(ElementsAre(NA, NA, 3, 3, 4.2)));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaLessThanZero) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series,
-1.2, false,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsZero) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 0.,
false,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaGreaterThanOne) {
const auto series = CreateDenseArray<float>({NA, 2, 3});
ASSERT_THAT(
InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.2,
false,
false),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsOne) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
false,
false),
IsOkAndHolds(ElementsAre(1, 2, 3, 3, 5)));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsOne_IgnoreMissing) {
const auto series = CreateDenseArray<float>({1, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
false,
true),
IsOkAndHolds(ElementsAre(1, 2, 3, 3, 5)));
}
TEST(ExponentialWeightedMovingAverageOpTest, AlphaEqualsOne_FirstMissing) {
const auto series = CreateDenseArray<float>({NA, 2, 3, NA, 5});
EXPECT_THAT(InvokeOperator<DenseArray<float>>("experimental.ewma", series, 1.,
false,
false),
IsOkAndHolds(ElementsAre(NA, 2, 3, 3, 5)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/experimental/dense_array/timeseries.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/experimental/dense_array/timeseries_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
df1fb9f3-b27e-4aba-9bd1-117f8935c674 | cpp | google/cel-cpp | evaluator_core | eval/eval/evaluator_core.cc | eval/eval/evaluator_core_test.cc | #include "eval/eval/evaluator_core.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "absl/utility/utility.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "runtime/activation_interface.h"
#include "runtime/managed_value_factory.h"
namespace google::api::expr::runtime {
FlatExpressionEvaluatorState::FlatExpressionEvaluatorState(
size_t value_stack_size, size_t comprehension_slot_count,
const cel::TypeProvider& type_provider,
cel::MemoryManagerRef memory_manager)
: value_stack_(value_stack_size),
comprehension_slots_(comprehension_slot_count),
managed_value_factory_(absl::in_place, type_provider, memory_manager),
value_factory_(&managed_value_factory_->get()) {}
FlatExpressionEvaluatorState::FlatExpressionEvaluatorState(
size_t value_stack_size, size_t comprehension_slot_count,
cel::ValueManager& value_factory)
: value_stack_(value_stack_size),
comprehension_slots_(comprehension_slot_count),
managed_value_factory_(absl::nullopt),
value_factory_(&value_factory) {}
void FlatExpressionEvaluatorState::Reset() {
value_stack_.Clear();
comprehension_slots_.Reset();
}
const ExpressionStep* ExecutionFrame::Next() {
while (true) {
const size_t end_pos = execution_path_.size();
if (ABSL_PREDICT_TRUE(pc_ < end_pos)) {
const auto* step = execution_path_[pc_++].get();
ABSL_ASSUME(step != nullptr);
return step;
}
if (ABSL_PREDICT_TRUE(pc_ == end_pos)) {
if (!call_stack_.empty()) {
SubFrame& subframe = call_stack_.back();
pc_ = subframe.return_pc;
execution_path_ = subframe.return_expression;
ABSL_DCHECK_EQ(value_stack().size(), subframe.expected_stack_size);
comprehension_slots().Set(subframe.slot_index, value_stack().Peek(),
value_stack().PeekAttribute());
call_stack_.pop_back();
continue;
}
} else {
ABSL_LOG(ERROR) << "Attempting to step beyond the end of execution path.";
}
return nullptr;
}
}
namespace {
class EvaluationStatus final {
public:
explicit EvaluationStatus(absl::Status&& status) {
::new (static_cast<void*>(&status_[0])) absl::Status(std::move(status));
}
EvaluationStatus() = delete;
EvaluationStatus(const EvaluationStatus&) = delete;
EvaluationStatus(EvaluationStatus&&) = delete;
EvaluationStatus& operator=(const EvaluationStatus&) = delete;
EvaluationStatus& operator=(EvaluationStatus&&) = delete;
absl::Status Consume() && {
return std::move(*reinterpret_cast<absl::Status*>(&status_[0]));
}
bool ok() const {
return ABSL_PREDICT_TRUE(
reinterpret_cast<const absl::Status*>(&status_[0])->ok());
}
private:
alignas(absl::Status) char status_[sizeof(absl::Status)];
};
}
absl::StatusOr<cel::Value> ExecutionFrame::Evaluate(
EvaluationListener& listener) {
const size_t initial_stack_size = value_stack().size();
if (!listener) {
for (const ExpressionStep* expr = Next();
ABSL_PREDICT_TRUE(expr != nullptr); expr = Next()) {
if (EvaluationStatus status(expr->Evaluate(this)); !status.ok()) {
return std::move(status).Consume();
}
}
} else {
for (const ExpressionStep* expr = Next();
ABSL_PREDICT_TRUE(expr != nullptr); expr = Next()) {
if (EvaluationStatus status(expr->Evaluate(this)); !status.ok()) {
return std::move(status).Consume();
}
if (pc_ == 0 || !expr->comes_from_ast()) {
continue;
}
if (ABSL_PREDICT_FALSE(value_stack().empty())) {
ABSL_LOG(ERROR) << "Stack is empty after a ExpressionStep.Evaluate. "
"Try to disable short-circuiting.";
continue;
}
if (EvaluationStatus status(
listener(expr->id(), value_stack().Peek(), value_factory()));
!status.ok()) {
return std::move(status).Consume();
}
}
}
const size_t final_stack_size = value_stack().size();
if (ABSL_PREDICT_FALSE(final_stack_size != initial_stack_size + 1 ||
final_stack_size == 0)) {
return absl::InternalError(absl::StrCat(
"Stack error during evaluation: expected=", initial_stack_size + 1,
", actual=", final_stack_size));
}
cel::Value value = std::move(value_stack().Peek());
value_stack().Pop(1);
return value;
}
FlatExpressionEvaluatorState FlatExpression::MakeEvaluatorState(
cel::MemoryManagerRef manager) const {
return FlatExpressionEvaluatorState(path_.size(), comprehension_slots_size_,
type_provider_, manager);
}
FlatExpressionEvaluatorState FlatExpression::MakeEvaluatorState(
cel::ValueManager& value_factory) const {
return FlatExpressionEvaluatorState(path_.size(), comprehension_slots_size_,
value_factory);
}
absl::StatusOr<cel::Value> FlatExpression::EvaluateWithCallback(
const cel::ActivationInterface& activation, EvaluationListener listener,
FlatExpressionEvaluatorState& state) const {
state.Reset();
ExecutionFrame frame(subexpressions_, activation, options_, state,
std::move(listener));
return frame.Evaluate(frame.callback());
}
cel::ManagedValueFactory FlatExpression::MakeValueFactory(
cel::MemoryManagerRef memory_manager) const {
return cel::ManagedValueFactory(type_provider_, memory_manager);
}
} | #include "eval/eval/evaluator_core.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "base/type_provider.h"
#include "eval/compiler/cel_expression_builder_flat_impl.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/internal/interop.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_value.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
using ::cel::IntValue;
using ::cel::TypeProvider;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::interop_internal::CreateIntValue;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::runtime::RegisterBuiltinFunctions;
using ::testing::_;
using ::testing::Eq;
class FakeConstExpressionStep : public ExpressionStep {
public:
FakeConstExpressionStep() : ExpressionStep(0, true) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
frame->value_stack().Push(CreateIntValue(0));
return absl::OkStatus();
}
};
class FakeIncrementExpressionStep : public ExpressionStep {
public:
FakeIncrementExpressionStep() : ExpressionStep(0, true) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
auto value = frame->value_stack().Peek();
frame->value_stack().Pop(1);
EXPECT_TRUE(value->Is<IntValue>());
int64_t val = value.GetInt().NativeValue();
frame->value_stack().Push(CreateIntValue(val + 1));
return absl::OkStatus();
}
};
TEST(EvaluatorCoreTest, ExecutionFrameNext) {
ExecutionPath path;
google::protobuf::Arena arena;
auto manager = ProtoMemoryManagerRef(&arena);
auto const_step = std::make_unique<const FakeConstExpressionStep>();
auto incr_step1 = std::make_unique<const FakeIncrementExpressionStep>();
auto incr_step2 = std::make_unique<const FakeIncrementExpressionStep>();
path.push_back(std::move(const_step));
path.push_back(std::move(incr_step1));
path.push_back(std::move(incr_step2));
auto dummy_expr = std::make_unique<Expr>();
cel::RuntimeOptions options;
options.unknown_processing = cel::UnknownProcessingOptions::kDisabled;
cel::Activation activation;
FlatExpressionEvaluatorState state(path.size(),
0,
TypeProvider::Builtin(), manager);
ExecutionFrame frame(path, activation, options, state);
EXPECT_THAT(frame.Next(), Eq(path[0].get()));
EXPECT_THAT(frame.Next(), Eq(path[1].get()));
EXPECT_THAT(frame.Next(), Eq(path[2].get()));
EXPECT_THAT(frame.Next(), Eq(nullptr));
}
TEST(EvaluatorCoreTest, SimpleEvaluatorTest) {
ExecutionPath path;
auto const_step = std::make_unique<FakeConstExpressionStep>();
auto incr_step1 = std::make_unique<FakeIncrementExpressionStep>();
auto incr_step2 = std::make_unique<FakeIncrementExpressionStep>();
path.push_back(std::move(const_step));
path.push_back(std::move(incr_step1));
path.push_back(std::move(incr_step2));
CelExpressionFlatImpl impl(FlatExpression(
std::move(path), 0, cel::TypeProvider::Builtin(), cel::RuntimeOptions{}));
Activation activation;
google::protobuf::Arena arena;
auto status = impl.Evaluate(activation, &arena);
EXPECT_OK(status);
auto value = status.value();
EXPECT_TRUE(value.IsInt64());
EXPECT_THAT(value.Int64OrDie(), Eq(2));
}
class MockTraceCallback {
public:
MOCK_METHOD(void, Call,
(int64_t expr_id, const CelValue& value, google::protobuf::Arena*));
};
TEST(EvaluatorCoreTest, TraceTest) {
Expr expr;
google::api::expr::v1alpha1::SourceInfo source_info;
expr.set_id(1);
auto and_call = expr.mutable_call_expr();
and_call->set_function("_&&_");
auto true_expr = and_call->add_args();
true_expr->set_id(2);
true_expr->mutable_const_expr()->set_int64_value(1);
auto comp_expr = and_call->add_args();
comp_expr->set_id(3);
auto comp = comp_expr->mutable_comprehension_expr();
comp->set_iter_var("x");
comp->set_accu_var("accu");
auto list_expr = comp->mutable_iter_range();
list_expr->set_id(4);
auto el1_expr = list_expr->mutable_list_expr()->add_elements();
el1_expr->set_id(11);
el1_expr->mutable_const_expr()->set_int64_value(1);
auto el2_expr = list_expr->mutable_list_expr()->add_elements();
el2_expr->set_id(12);
el2_expr->mutable_const_expr()->set_int64_value(2);
auto el3_expr = list_expr->mutable_list_expr()->add_elements();
el3_expr->set_id(13);
el3_expr->mutable_const_expr()->set_int64_value(3);
auto accu_init_expr = comp->mutable_accu_init();
accu_init_expr->set_id(20);
accu_init_expr->mutable_const_expr()->set_bool_value(true);
auto loop_cond_expr = comp->mutable_loop_condition();
loop_cond_expr->set_id(21);
loop_cond_expr->mutable_const_expr()->set_bool_value(true);
auto loop_step_expr = comp->mutable_loop_step();
loop_step_expr->set_id(22);
auto condition = loop_step_expr->mutable_call_expr();
condition->set_function("_>_");
auto iter_expr = condition->add_args();
iter_expr->set_id(23);
iter_expr->mutable_ident_expr()->set_name("x");
auto zero_expr = condition->add_args();
zero_expr->set_id(24);
zero_expr->mutable_const_expr()->set_int64_value(0);
auto result_expr = comp->mutable_result();
result_expr->set_id(25);
result_expr->mutable_const_expr()->set_bool_value(true);
cel::RuntimeOptions options;
options.short_circuiting = false;
CelExpressionBuilderFlatImpl builder(options);
ASSERT_OK(RegisterBuiltinFunctions(builder.GetRegistry()));
ASSERT_OK_AND_ASSIGN(auto cel_expr,
builder.CreateExpression(&expr, &source_info));
Activation activation;
google::protobuf::Arena arena;
MockTraceCallback callback;
EXPECT_CALL(callback, Call(accu_init_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(el1_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(el2_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(el3_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(list_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(loop_cond_expr->id(), _, &arena)).Times(3);
EXPECT_CALL(callback, Call(iter_expr->id(), _, &arena)).Times(3);
EXPECT_CALL(callback, Call(zero_expr->id(), _, &arena)).Times(3);
EXPECT_CALL(callback, Call(loop_step_expr->id(), _, &arena)).Times(3);
EXPECT_CALL(callback, Call(result_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(comp_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(true_expr->id(), _, &arena));
EXPECT_CALL(callback, Call(expr.id(), _, &arena));
auto eval_status = cel_expr->Trace(
activation, &arena,
[&](int64_t expr_id, const CelValue& value, google::protobuf::Arena* arena) {
callback.Call(expr_id, value, arena);
return absl::OkStatus();
});
ASSERT_OK(eval_status);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/evaluator_core.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/evaluator_core_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
9d6bf915-a5e4-4331-9539-03281654d5cd | cpp | tensorflow/tensorflow | set_device | tensorflow/tools/graph_transforms/set_device.cc | tensorflow/tools/graph_transforms/set_device_test.cc | #include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status SetDevice(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
string new_device;
TF_RETURN_IF_ERROR(context.GetOneStringParameter("device", "", &new_device));
bool if_default;
TF_RETURN_IF_ERROR(
context.GetOneBoolParameter("if_default", false, &if_default));
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
if (!if_default || (node.device().empty())) {
new_node->set_device(new_device);
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("set_device", SetDevice);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status SetDevice(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
namespace {
GraphDef CreateDeviceGraph() {
GraphDef graph_def;
NodeDef* mul_node1 = graph_def.add_node();
mul_node1->set_name("mul_node1");
mul_node1->set_op("Mul");
mul_node1->set_device("/device:CPU:0");
mul_node1->add_input("add_node2");
mul_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
add_node2->set_device("/device:GPU:1");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
return graph_def;
}
}
TEST(SetDeviceTest, TestSetDevice) {
GraphDef graph_def = CreateDeviceGraph();
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"mul_node1"};
context.params.insert(std::pair<string, std::vector<string>>(
{"device", {string("/device:CPU:0")}}));
TF_ASSERT_OK(SetDevice(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ("/device:CPU:0", node_lookup.at("mul_node1")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node2")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node3")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node1")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node2")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("const_node3")->device());
EXPECT_EQ("/device:CPU:0", node_lookup.at("add_node4")->device());
}
TEST(SetDeviceTest, TestSetDeviceIfDefault) {
GraphDef graph_def = CreateDeviceGraph();
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"mul_node1"};
context.params.insert(std::pair<string, std::vector<string>>(
{"device", {string("/device:GPU:0")}}));
context.params.insert(
std::pair<string, std::vector<string>>({"if_default", {string("true")}}));
TF_ASSERT_OK(SetDevice(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ("/device:CPU:0", node_lookup.at("mul_node1")->device());
EXPECT_EQ("/device:GPU:1", node_lookup.at("add_node2")->device());
EXPECT_EQ("/device:GPU:0", node_lookup.at("add_node3")->device());
EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node1")->device());
EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node2")->device());
EXPECT_EQ("/device:GPU:0", node_lookup.at("const_node3")->device());
EXPECT_EQ("/device:GPU:0", node_lookup.at("add_node4")->device());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/set_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/set_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
babaf6da-6250-4244-8550-c1c937e9a5b0 | cpp | google/tensorstore | poly | tensorstore/internal/poly/poly.h | tensorstore/internal/poly/poly_test.cc | #ifndef TENSORSTORE_INTERNAL_POLY_POLY_H_
#define TENSORSTORE_INTERNAL_POLY_POLY_H_
#include <cstddef>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/meta/type_traits.h"
#include "tensorstore/internal/poly/poly_impl.h"
namespace tensorstore {
namespace poly {
template <typename T, typename... Signature>
using SupportsPolySignatures =
std::conjunction<typename internal_poly::SignatureTraits<
Signature>::template IsSupportedBy<T>...>;
template <size_t InlineSize, bool Copyable, typename... Signature>
class Poly;
template <typename T>
struct IsPoly : public std::false_type {};
template <size_t InlineSize, bool Copyable, typename... Signature>
struct IsPoly<Poly<InlineSize, Copyable, Signature...>>
: public std::true_type {};
template <typename T, bool Copyable, typename... Signature>
struct IsCompatibleWithPoly : public SupportsPolySignatures<T, Signature...> {};
template <typename T, typename... Signature>
struct IsCompatibleWithPoly<T, true, Signature...>
: public std::integral_constant<
bool, (std::is_copy_constructible<T>::value &&
SupportsPolySignatures<T, Signature...>::value)> {};
template <size_t InlineSize_, bool Copyable, typename... Signature>
class Poly
: private internal_poly::PolyImpl<Poly<InlineSize_, Copyable, Signature...>,
Signature...> {
template <typename, typename...>
friend class internal_poly::PolyImpl;
template <size_t, bool, typename...>
friend class Poly;
static constexpr size_t InlineSize =
internal_poly_storage::ActualInlineSize(InlineSize_);
using Storage = internal_poly_storage::Storage<InlineSize, Copyable>;
using Base = internal_poly::PolyImpl<Poly, Signature...>;
using VTable = internal_poly::VTableType<Signature...>;
template <typename Self>
using VTInstance =
internal_poly::VTableInstance<typename Storage::template Ops<Self>,
Copyable, Signature...>;
template <typename... S>
using HasConvertibleVTable =
std::is_convertible<internal_poly::VTableType<S...>, VTable>;
public:
template <typename T>
using IsCompatible =
std::disjunction<std::is_same<Poly, T>,
IsCompatibleWithPoly<T, Copyable, Signature...>>;
template <typename T>
using IsCompatibleAndConstructible =
std::disjunction<
std::is_same<Poly, absl::remove_cvref_t<T>>,
std::conjunction<
IsCompatibleWithPoly<absl::remove_cvref_t<T>, Copyable,
Signature...>,
std::is_constructible<absl::remove_cvref_t<T>, T&&>>>;
Poly() = default;
Poly(std::nullptr_t) noexcept {}
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
Poly(T&& obj) {
Construct(std::in_place_type_t<absl::remove_cvref_t<T>>{},
std::forward<T>(obj));
}
template <typename T, typename... U,
std::enable_if_t<(IsCompatible<T>::value &&
std::is_constructible_v<T, U&&...>)>* = nullptr>
Poly(std::in_place_type_t<T> in_place, U&&... arg) {
Construct(in_place, std::forward<U>(arg)...);
}
Poly(const Poly&) = default;
Poly(Poly&&) = default;
Poly& operator=(const Poly&) = default;
Poly& operator=(Poly&&) noexcept = default;
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
Poly& operator=(T&& obj) {
emplace(std::forward<T>(obj));
return *this;
}
Poly& operator=(std::nullptr_t) noexcept {
storage_.Destroy();
return *this;
}
template <typename T, typename... U,
std::enable_if_t<(IsCompatible<T>::value &&
std::is_constructible_v<T, U&&...>)>* = nullptr>
void emplace(U&&... arg) {
storage_.Destroy();
Construct(std::in_place_type_t<T>{}, std::forward<U>(arg)...);
}
template <typename T,
std::enable_if_t<IsCompatibleAndConstructible<T>::value>* = nullptr>
void emplace(T&& obj) {
storage_.Destroy();
Construct(std::in_place_type_t<absl::remove_cvref_t<T>>{},
std::forward<T>(obj));
}
using Base::operator();
explicit operator bool() const { return !storage_.null(); }
template <typename T>
T* target() {
return storage_.template get_if<T>();
}
template <typename T>
const T* target() const {
return storage_.template get_if<T>();
}
friend bool operator==(std::nullptr_t, const Poly& poly) {
return static_cast<bool>(poly) == false;
}
friend bool operator!=(std::nullptr_t, const Poly& poly) {
return static_cast<bool>(poly) == true;
}
friend bool operator==(const Poly& poly, std::nullptr_t) {
return static_cast<bool>(poly) == false;
}
friend bool operator!=(const Poly& poly, std::nullptr_t) {
return static_cast<bool>(poly) == true;
}
private:
template <typename T, typename... U>
std::enable_if_t<!IsPoly<T>::value> Construct(std::in_place_type_t<T>,
U&&... arg) {
return storage_.template ConstructT<T>(&VTInstance<T>::vtable,
static_cast<U&&>(arg)...);
}
template <size_t ISize, bool C, typename... S, typename T>
void Construct(std::in_place_type_t<Poly<ISize, C, S...>>, T&& poly) {
if constexpr (internal_poly_storage::ActualInlineSize(ISize) <=
InlineSize &&
HasConvertibleVTable<S...>::value) {
if constexpr (std::is_lvalue_reference_v<decltype(poly)>) {
storage_.CopyConstruct(std::forward<T>(poly).storage_);
} else {
storage_.Construct(std::forward<T>(poly).storage_);
}
} else {
storage_.template ConstructT<Poly<ISize, C, S...>>(
&VTInstance<Poly<ISize, C, S...>>::vtable, std::forward<T>(poly));
}
}
Storage storage_;
};
}
}
#endif | #include "tensorstore/internal/poly/poly.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
#include "absl/functional/function_ref.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::internal_poly::CallPolyApplyResult;
using ::tensorstore::internal_poly::HasPolyApply;
using ::tensorstore::internal_poly::IsCallPolyApplyResultConvertible;
using ::tensorstore::poly::Poly;
struct GetWidth {};
struct GetHeight {};
struct Scale {};
using PolyRectangle = Poly<sizeof(double), true, double(GetWidth) const,
double(GetHeight) const, void(Scale, double scalar)>;
struct Rectangle {
double width;
double height;
double operator()(GetWidth) const { return width; }
double operator()(GetHeight) const { return height; }
void operator()(Scale, double scalar) {
width *= scalar;
height *= scalar;
}
};
struct Square {
double size;
double operator()(GetWidth) const { return size; }
double operator()(GetHeight) const { return size; }
};
void PolyApply(Square& self, Scale, double scalar) { self.size *= scalar; }
template <typename T, typename P>
bool IsStoredInline(P& p) {
auto min = reinterpret_cast<uintptr_t>(&p);
auto t = reinterpret_cast<uintptr_t>(p.template target<T>());
return t >= min && t <= (min + sizeof(p));
}
TEST(PolyTest, Example) {
PolyRectangle square = Square{5};
EXPECT_EQ(5, square(GetWidth{}));
EXPECT_EQ(5, square(GetHeight{}));
square(Scale{}, 2);
EXPECT_EQ(10, square(GetWidth{}));
EXPECT_EQ(10, square(GetHeight{}));
PolyRectangle rect = Rectangle{6, 7};
EXPECT_EQ(6, rect(GetWidth{}));
EXPECT_EQ(7, rect(GetHeight{}));
rect(Scale{}, 2);
EXPECT_EQ(12, rect(GetWidth{}));
EXPECT_EQ(14, rect(GetHeight{}));
}
TEST(PolyTest, Interface) {
class RectangleInterface {
public:
RectangleInterface(PolyRectangle poly) : poly(std::move(poly)) {}
operator PolyRectangle() { return poly; }
double GetHeight() const { return poly(::GetHeight{}); }
double GetWidth() const { return poly(::GetWidth{}); }
double GetArea() const { return GetHeight() * GetWidth(); }
void Scale(double scalar) { poly(::Scale{}, scalar); }
private:
PolyRectangle poly;
};
{
RectangleInterface rect(Square{5});
EXPECT_EQ(5, rect.GetWidth());
EXPECT_EQ(5, rect.GetHeight());
EXPECT_EQ(25, rect.GetArea());
rect.Scale(2);
EXPECT_EQ(10, rect.GetWidth());
EXPECT_EQ(10, rect.GetHeight());
}
{
RectangleInterface rect(Rectangle{6, 7});
EXPECT_EQ(6, rect.GetWidth());
EXPECT_EQ(7, rect.GetHeight());
EXPECT_EQ(42, rect.GetArea());
rect.Scale(2);
EXPECT_EQ(12, rect.GetWidth());
EXPECT_EQ(14, rect.GetHeight());
}
}
std::string Foo(Poly<0, true, std::string()> poly) { return "Text: " + poly(); }
int Foo(Poly<0, true, int()> poly) { return 3 + poly(); }
TEST(PolyTest, ConstructorOverloadResolution) {
EXPECT_EQ(6, Foo([] { return 3; }));
EXPECT_EQ("Text: Message", Foo([] { return "Message"; }));
}
struct Add {
std::shared_ptr<int> value;
Add(std::shared_ptr<int> value) : value(value) {}
template <typename T>
T operator()(T x) const {
return x + *value;
}
};
TEST(PolyTest, DefaultConstruct) {
Poly<1, true, int(int)&, float(float)&> poly;
EXPECT_FALSE(poly);
EXPECT_EQ(nullptr, poly.target<Add>());
const auto& const_poly = poly;
EXPECT_EQ(nullptr, const_poly.target<Add>());
}
TEST(PolyTest, NullptrConstruct) {
Poly<1, true, int(int)&, float(float)&> poly(nullptr);
EXPECT_FALSE(poly);
}
TEST(PolyTest, NullCopy) {
Poly<1, true, int(int)&, float(float)&> poly;
EXPECT_FALSE(poly);
auto poly2 = poly;
EXPECT_FALSE(poly2);
}
TEST(PolyTest, InlineConstruct) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_TRUE(IsStoredInline<Add>(poly));
auto* contained_obj = poly.target<Add>();
ASSERT_NE(nullptr, contained_obj);
EXPECT_EQ(amount, contained_obj->value);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, ConstructInplace) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(
std::in_place_type_t<Add>{}, amount);
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, Emplace) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly;
poly.emplace(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly.emplace(Add{amount2});
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, EmplaceInplace) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly;
poly.emplace<Add>(amount);
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly.emplace<Add>(amount2);
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, AssignNullptr) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
poly = nullptr;
EXPECT_EQ(1, amount.use_count());
EXPECT_FALSE(poly);
}
TEST(PolyTest, AssignObject) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
auto amount2 = std::make_shared<int>(2);
poly = Add{amount2};
EXPECT_TRUE(poly);
EXPECT_EQ(1, amount.use_count());
EXPECT_EQ(2, amount2.use_count());
EXPECT_EQ(4, poly(2));
EXPECT_EQ(4.5, poly(2.5));
}
TEST(PolyTest, CopyAssign) {
auto amount = std::make_shared<int>(1);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
auto amount2 = std::make_shared<int>(2);
Poly<sizeof(Add), true, int(int)&, double(double)&> poly2(Add{amount2});
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount2.use_count());
poly2 =
static_cast<const Poly<sizeof(Add), true, int(int)&, double(double)&>&>(
poly);
EXPECT_EQ(1, amount2.use_count());
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
TEST(PolyTest, InlineMove) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
auto poly2 = std::move(poly);
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, InlineCopy) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(Add), true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(IsStoredInline<Add>(poly));
auto poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_TRUE(IsStoredInline<Add>(poly2));
EXPECT_TRUE(poly);
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapConstruct) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_TRUE(poly.target<Add>());
EXPECT_FALSE(IsStoredInline<Add>(poly));
EXPECT_EQ(amount, poly.target<Add>()->value);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapMove) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_FALSE(IsStoredInline<Add>(poly));
auto poly2 = std::move(poly);
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, HeapCopy) {
auto amount = std::make_shared<int>(1);
{
Poly<0, true, int(int)&, double(double)&> poly(Add{amount});
EXPECT_TRUE(poly);
EXPECT_EQ(2, amount.use_count());
EXPECT_FALSE(IsStoredInline<Add>(poly));
auto poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_TRUE(poly);
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
struct AddPolyApply {
std::shared_ptr<int> value;
template <typename T>
friend T PolyApply(const AddPolyApply& self, T x) {
return x + *self.value;
}
};
static_assert(HasPolyApply<AddPolyApply, int>);
static_assert(!HasPolyApply<AddPolyApply, int, int>);
static_assert(!HasPolyApply<Add, int>);
static_assert(!HasPolyApply<Add, int, int>);
static_assert(std::is_same_v<CallPolyApplyResult<AddPolyApply, int>, int>);
static_assert(
std::is_same_v<CallPolyApplyResult<AddPolyApply, double>, double>);
static_assert(std::is_same_v<CallPolyApplyResult<Add, int>, int>);
static_assert(std::is_same_v<CallPolyApplyResult<Add, double>, double>);
static_assert(IsCallPolyApplyResultConvertible<Add, int, double>::value);
static_assert(IsCallPolyApplyResultConvertible<Add, double, double>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, int*, double>::value);
static_assert(IsCallPolyApplyResultConvertible<Add, void, double>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, void, int, int>::value);
static_assert(!IsCallPolyApplyResultConvertible<Add, int, int, int>::value);
static_assert(
IsCallPolyApplyResultConvertible<AddPolyApply, int, double>::value);
static_assert(
IsCallPolyApplyResultConvertible<AddPolyApply, double, double>::value);
static_assert(IsCallPolyApplyResultConvertible<AddPolyApply, void, int>::value);
static_assert(
!IsCallPolyApplyResultConvertible<AddPolyApply, int*, int>::value);
static_assert(
!IsCallPolyApplyResultConvertible<AddPolyApply, void, int, int>::value);
TEST(PolyTest, PolyApply) {
auto amount = std::make_shared<int>(1);
{
Poly<sizeof(AddPolyApply), true, int(int)&, double(double)&> poly(
AddPolyApply{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly);
EXPECT_EQ(3, poly(2));
EXPECT_EQ(3.5, poly(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, MoveOnly) {
struct Callable {
std::unique_ptr<int> value;
int operator()() const { return *value; }
};
using PolyT = Poly<sizeof(Callable), false, int() const>;
static_assert(!std::is_constructible_v<Poly<0, true, int() const>, Callable>);
static_assert(std::is_constructible_v<Poly<0, false, int() const>, Callable>);
PolyT poly(Callable{std::unique_ptr<int>(new int(5))});
auto poly2 = std::move(poly);
EXPECT_FALSE(poly);
EXPECT_EQ(5, poly2());
}
struct IntGetterSetter {
int operator()() { return value; }
void operator()(int v) { value = v; }
int value;
};
TEST(PolyTest, CopyConstructFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
EXPECT_TRUE(poly.target<Add>());
Poly2 poly2 = poly;
EXPECT_TRUE(poly2);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_EQ(3, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, MoveConstructFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
Poly2 poly2 = std::move(poly);
EXPECT_FALSE(poly);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, EmplaceFromPolyWithIncompatibleVTable) {
auto amount = std::make_shared<int>(1);
{
using Poly1 = Poly<sizeof(Add), true, int(int)&, double(double)&>;
using Poly2 = Poly<sizeof(Add), true, double(double)&, int(int)&>;
Poly1 poly(Add{amount});
EXPECT_EQ(2, amount.use_count());
Poly2 poly2;
poly2.emplace(std::move(poly));
EXPECT_FALSE(poly);
EXPECT_FALSE(poly2.target<Add>());
EXPECT_TRUE(poly2.target<Poly1>());
EXPECT_TRUE(poly2);
EXPECT_EQ(2, amount.use_count());
EXPECT_EQ(3, poly2(2));
EXPECT_EQ(3.5, poly2(2.5));
}
EXPECT_EQ(1, amount.use_count());
}
TEST(PolyTest, CopyConstructFromPolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2{poly1};
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
}
TEST(PolyTest, MoveConstructFromPolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2{std::move(poly1)};
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
EXPECT_FALSE(poly1);
}
TEST(PolyTest, EmplacePolyWithCompatibleVTable) {
Poly<0, true, void(int), int()> poly1 = IntGetterSetter{5};
EXPECT_EQ(5, poly1());
poly1(6);
EXPECT_EQ(6, poly1());
Poly<0, true, int()> poly2;
poly2.emplace(std::move(poly1));
EXPECT_TRUE(poly2.target<IntGetterSetter>());
EXPECT_EQ(6, poly2());
EXPECT_FALSE(poly1);
}
template <typename T>
using SinglePoly = Poly<0, false, T>;
template <template <typename> class OptionalLike,
template <typename> class FunctionLike>
void TestAvoidsSfinaeLoop() {
using Poly1 = FunctionLike<void()>;
using Poly2 = FunctionLike<OptionalLike<Poly1>()>;
struct X {
void operator()() const {}
};
struct Y {
OptionalLike<Poly1> operator()() const { return X{}; }
};
auto use_poly2 = [](Poly2) { };
use_poly2(Poly2{Y{}});
}
TEST(PolyTest, AvoidsSfinaeLoop) {
TestAvoidsSfinaeLoop<tensorstore::Result, absl::FunctionRef>();
TestAvoidsSfinaeLoop<tensorstore::Result, std::function>();
TestAvoidsSfinaeLoop<std::optional, SinglePoly>();
TestAvoidsSfinaeLoop<tensorstore::Result, SinglePoly>();
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/poly.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/poly/poly_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
ad4656c8-136d-4a47-88e4-36e8e523ceea | cpp | tensorflow/tensorflow | convert_asset_args | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "absl/algorithm/container.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::mlir::tf_saved_model::AssetOp;
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::LookupBoundInputOfType;
using ::tensorflow::AssetFileDef;
SmallVector<NamedAttribute> ReplaceBoundInputAttrWithIndexPathAttr(
const ArrayRef<NamedAttribute> arg_attrs, const StringRef index_path,
Builder& builder) {
SmallVector<NamedAttribute> new_arg_attrs;
for (auto arg_attr : arg_attrs) {
if (arg_attr.getName() == "tf_saved_model.bound_input") continue;
new_arg_attrs.emplace_back(arg_attr);
}
const NamedAttribute index_path_attr(
builder.getStringAttr(kTfSavedModelIndexPathAttr),
builder.getStrArrayAttr({index_path}));
new_arg_attrs.emplace_back(index_path_attr);
return new_arg_attrs;
}
StringRef MaybeStripAssetDirectoryPrefix(const StringRef filename) {
if (filename.find("assets/") == 0) {
return filename.drop_front(7);
} else {
return filename;
}
}
AssetFileDef CreateAssetFileDef(const StringRef filename,
const StringRef tensor_name) {
AssetFileDef asset_file_def{};
asset_file_def.set_filename(MaybeStripAssetDirectoryPrefix(filename).str());
tensorflow::TensorInfo tensor_info{};
tensor_info.set_name(tensor_name.str());
*asset_file_def.mutable_tensor_info() = tensor_info;
return asset_file_def;
}
SmallVector<StringRef> GetEntryFunctionInputs(func::FuncOp func_op) {
auto entry_function_attr =
func_op->getAttrOfType<DictionaryAttr>("tf.entry_function");
SmallVector<StringRef> inputs;
mlir::dyn_cast_or_null<StringAttr>(entry_function_attr.get("inputs"))
.strref()
.split(inputs, ",");
return inputs;
}
void ConvertMainArgAttrs(func::FuncOp main_func_op, const int arg_idx,
const StringRef index_path) {
const ArrayRef<NamedAttribute> arg_attrs =
main_func_op.getArgAttrDict(arg_idx).getValue();
Builder builder(main_func_op.getContext());
SmallVector<NamedAttribute> new_arg_attrs =
ReplaceBoundInputAttrWithIndexPathAttr(arg_attrs, index_path, builder);
main_func_op.setArgAttrs(arg_idx, new_arg_attrs);
}
}
FailureOr<SmallVector<AssetFileDef>> ConvertAssetArgs(ModuleOp module_op) {
func::FuncOp main_func_op = FindMainFuncOp(module_op);
if (!main_func_op) return failure();
SmallVector<StringRef> input_names = GetEntryFunctionInputs(main_func_op);
SymbolTable symbol_table(module_op);
SmallVector<AssetFileDef> asset_file_defs;
for (BlockArgument argument : main_func_op.getArguments()) {
const int arg_idx = argument.getArgNumber();
auto asset_op =
LookupBoundInputOfType<AssetOp>(main_func_op, arg_idx, symbol_table);
if (!asset_op) continue;
const StringRef input_name = input_names[arg_idx];
ConvertMainArgAttrs(main_func_op, arg_idx, input_name);
asset_file_defs.emplace_back(CreateAssetFileDef(
asset_op.getFilenameAttr(), input_name));
}
return asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::tensorflow::AssetFileDef;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class ConvertAssetArgsTest : public ::testing::Test {
protected:
ConvertAssetArgsTest() {
ctx_.loadDialect<func::FuncDialect, TF::TensorFlowDialect,
tf_saved_model::TensorFlowSavedModelDialect>();
}
OwningOpRef<ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref = parseSourceString<ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
mlir::MLIRContext ctx_{};
};
func::FuncOp GetMainFuncOp(ModuleOp module_op) {
for (auto func_op : module_op.getOps<func::FuncOp>()) {
if (func_op.getSymName() == "main") {
return func_op;
}
}
return {};
}
TEST_F(ConvertAssetArgsTest, ConvertsSingleAssetArg) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.asset"() {filename = "assets/file_0.txt", sym_name = "__tf_saved_model_asset0"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.bound_input = @__tf_saved_model_asset0}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, SizeIs(1));
const AssetFileDef& asset_file_def = *asset_file_defs->begin();
EXPECT_THAT(asset_file_def.filename(), Eq("file_0.txt"));
EXPECT_THAT(asset_file_def.tensor_info().name(), Eq("arg_0:0"));
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, NonBoundedArgsNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, ArgsBoundedToGlobalTensorNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.global_tensor"() {type = tensor<2xi32>, value = dense<2> : tensor<2xi32>, sym_name = "__tf_saved_model_x"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.resource<tensor<2xi32>>> {tf_saved_model.bound_input = @__tf_saved_model_x}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), NotNull());
}
TEST_F(ConvertAssetArgsTest, FailsWhenNoMain) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(module {})mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(failed(asset_file_defs));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e235071c-7a31-4583-bb84-dc3a97a962d3 | cpp | google/tensorstore | function | tensorstore/serialization/function.cc | tensorstore/serialization/function_test.cc | #include "tensorstore/serialization/function.h"
#include <string_view>
#include <typeinfo>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace serialization {
namespace internal_serialization {
bool NonSerializableFunctionBase::Encode(EncodeSink& sink) const {
sink.Fail(internal_serialization::NonSerializableError());
return false;
}
void NonSerializableFunctionBase::GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const {
}
using SerializableFunctionRegistry =
internal::HeterogeneousHashSet<const RegisteredSerializableFunction*,
RegisteredSerializableFunction::Key,
&RegisteredSerializableFunction::key>;
SerializableFunctionRegistry& GetSerializableFunctionRegistry() {
static absl::NoDestructor<SerializableFunctionRegistry> registry;
return *registry;
}
void RegisterSerializableFunction(const RegisteredSerializableFunction& r) {
if (!GetSerializableFunctionRegistry().insert(&r).second) {
ABSL_LOG(FATAL) << "Duplicate SerializableFunction registration: id="
<< r.id << ", signature=" << r.signature->name();
}
}
SerializableFunctionBase::~SerializableFunctionBase() = default;
bool DecodeSerializableFunction(DecodeSource& source,
SerializableFunctionBase::Ptr& value,
const std::type_info& signature) {
std::string_view id;
if (!serialization::Decode(source, id)) return false;
auto& registry = GetSerializableFunctionRegistry();
auto it = registry.find(RegisteredSerializableFunction::Key(signature, id));
if (it == registry.end()) {
source.Fail(absl::DataLossError(
tensorstore::StrCat("SerializableFunction not registered: ", id)));
return false;
}
return (*it)->decode(source, value);
}
}
}
} | #include "tensorstore/serialization/function.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::serialization::BindFront;
using ::tensorstore::serialization::NonSerializable;
using ::tensorstore::serialization::SerializableFunction;
using ::tensorstore::serialization::SerializationRoundTrip;
TEST(SerializationTest, Function) {
SerializableFunction<int()> func([] { return 3; });
EXPECT_EQ(3, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(3, func_decoded());
}
TEST(SerializationTest, BindFront) {
SerializableFunction<int()> func =
BindFront([](int a, int b) { return a + b; }, 2, 5);
EXPECT_EQ(7, func());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func_decoded,
SerializationRoundTrip(func));
EXPECT_EQ(7, func_decoded());
}
TEST(SerializationTest, NonSerializable) {
SerializableFunction<int()> func = NonSerializable{[y = 5] { return y; }};
EXPECT_EQ(5, func());
EXPECT_THAT(SerializationRoundTrip(func),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Serialization not supported.*"));
}
struct FunctionWithId1 {
constexpr static const char id[] = "my_test_function1";
int operator()() const { return 1; }
};
struct FunctionWithId2 {
constexpr static const char id[] = "my_test_function2";
int operator()() const { return 2; }
};
TEST(SerializationTest, Id) {
SerializableFunction<int()> func1 = FunctionWithId1{};
SerializableFunction<int()> func2 = FunctionWithId2{};
EXPECT_EQ(1, func1());
EXPECT_EQ(2, func2());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func1_copy,
SerializationRoundTrip(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto func2_copy,
SerializationRoundTrip(func2));
EXPECT_EQ(1, func1_copy());
EXPECT_EQ(2, func2_copy());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto func1_encoded, tensorstore::serialization::EncodeBatch(func1));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_func1_encoded,
tensorstore::serialization::EncodeBatch(
std::string_view(FunctionWithId1::id)));
EXPECT_EQ(expected_func1_encoded, func1_encoded);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/function.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/serialization/function_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cd683c63-0679-46b1-95e4-b2624b9ce00a | cpp | google/tensorstore | uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <algorithm>
#include "absl/base/optimization.h"
#include "absl/strings/str_format.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/murmurhash3.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto HashFunctionBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
using HashFunction = ShardingSpec::HashFunction;
return jb::Enum<HashFunction, const char*>({
{HashFunction::identity, "identity"},
{HashFunction::murmurhash3_x86_128, "murmurhash3_x86_128"},
})(is_loading, options, obj, j);
};
constexpr auto DefaultableDataEncodingJsonBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
using DataEncoding = ShardingSpec::DataEncoding;
return jb::DefaultValue<jb::kAlwaysIncludeDefaults>(
[](auto* v) { *v = DataEncoding::raw; }, DataEncodingJsonBinder)(
is_loading, options, obj, j);
};
}
TENSORSTORE_DEFINE_JSON_BINDER(
DataEncodingJsonBinder, jb::Enum<ShardingSpec::DataEncoding, const char*>({
{ShardingSpec::DataEncoding::raw, "raw"},
{ShardingSpec::DataEncoding::gzip, "gzip"},
}))
std::ostream& operator<<(std::ostream& os, ShardingSpec::HashFunction x) {
return os << jb::ToJson(x, HashFunctionBinder).value();
}
void to_json(::nlohmann::json& out,
ShardingSpec::HashFunction x) {
out = jb::ToJson(x, HashFunctionBinder).value();
}
std::ostream& operator<<(std::ostream& os, ShardingSpec::DataEncoding x) {
return os << jb::ToJson(x, DataEncodingJsonBinder).value();
}
std::ostream& operator<<(std::ostream& os, const ShardingSpec& x) {
return os << jb::ToJson(x).value();
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ShardingSpec, [](auto is_loading,
const auto& options,
auto* obj, auto* j) {
return jb::Object(
jb::Member("@type",
jb::Constant([] { return "neuroglancer_uint64_sharded_v1"; })),
jb::Member("preshift_bits", jb::Projection(&ShardingSpec::preshift_bits,
jb::Integer<int>(0, 64))),
jb::Member("minishard_bits", jb::Projection(&ShardingSpec::minishard_bits,
jb::Integer<int>(0, 32))),
jb::Member("shard_bits",
jb::Dependent([](auto is_loading, const auto& options,
auto* obj, auto* j) {
return jb::Projection(
&ShardingSpec::shard_bits,
jb::Integer<int>(0, 64 - obj->minishard_bits));
})),
jb::Member("hash", jb::Projection(&ShardingSpec::hash_function,
HashFunctionBinder)),
jb::Member("data_encoding",
jb::Projection(&ShardingSpec::data_encoding,
DefaultableDataEncodingJsonBinder)),
jb::Member("minishard_index_encoding",
jb::Projection(&ShardingSpec::minishard_index_encoding,
DefaultableDataEncodingJsonBinder)))(
is_loading, options, obj, j);
})
bool operator==(const ShardingSpec& a, const ShardingSpec& b) {
return a.hash_function == b.hash_function &&
a.preshift_bits == b.preshift_bits &&
a.minishard_bits == b.minishard_bits && a.shard_bits == b.shard_bits &&
a.data_encoding == b.data_encoding &&
a.minishard_index_encoding == b.minishard_index_encoding;
}
std::string GetShardKey(const ShardingSpec& sharding_spec,
std::string_view prefix, uint64_t shard_number) {
return internal::JoinPath(
prefix,
absl::StrFormat("%0*x.shard", CeilOfRatio(sharding_spec.shard_bits, 4),
shard_number));
}
namespace {
constexpr uint64_t ShiftRightUpTo64(uint64_t x, int amount) {
if (amount == 64) return 0;
return x >> amount;
}
uint64_t GetLowBitMask(int num_bits) {
if (num_bits == 64) return ~uint64_t(0);
return (uint64_t(1) << num_bits) - 1;
}
}
uint64_t HashChunkId(ShardingSpec::HashFunction h, uint64_t key) {
switch (h) {
case ShardingSpec::HashFunction::identity:
return key;
case ShardingSpec::HashFunction::murmurhash3_x86_128: {
uint32_t out[4] = {0, 0, 0};
MurmurHash3_x86_128Hash64Bits(key, out);
return (static_cast<uint64_t>(out[1]) << 32) | out[0];
}
}
ABSL_UNREACHABLE();
}
ChunkCombinedShardInfo GetChunkShardInfo(const ShardingSpec& sharding_spec,
ChunkId chunk_id) {
ChunkCombinedShardInfo result;
const uint64_t hash_input =
ShiftRightUpTo64(chunk_id.value, sharding_spec.preshift_bits);
const uint64_t hash_output =
HashChunkId(sharding_spec.hash_function, hash_input);
result.shard_and_minishard =
hash_output &
GetLowBitMask(sharding_spec.minishard_bits + sharding_spec.shard_bits);
return result;
}
ChunkSplitShardInfo GetSplitShardInfo(const ShardingSpec& sharding_spec,
ChunkCombinedShardInfo combined_info) {
ChunkSplitShardInfo result;
result.minishard = combined_info.shard_and_minishard &
GetLowBitMask(sharding_spec.minishard_bits);
result.shard = ShiftRightUpTo64(combined_info.shard_and_minishard,
sharding_spec.minishard_bits) &
GetLowBitMask(sharding_spec.shard_bits);
return result;
}
ChunkCombinedShardInfo GetCombinedShardInfo(const ShardingSpec& sharding_spec,
ChunkSplitShardInfo split_info) {
ChunkCombinedShardInfo result;
result.shard_and_minishard = split_info.minishard;
if (sharding_spec.minishard_bits != 64) {
result.shard_and_minishard |=
(split_info.shard << sharding_spec.minishard_bits);
}
return result;
}
int64_t ShardIndexSize(const ShardingSpec& sharding_spec) {
return static_cast<int64_t>(16) << sharding_spec.minishard_bits;
}
Result<ByteRange> GetAbsoluteShardByteRange(ByteRange relative_range,
const ShardingSpec& sharding_spec) {
const int64_t offset = ShardIndexSize(sharding_spec);
ByteRange result;
if (internal::AddOverflow(relative_range.inclusive_min, offset,
&result.inclusive_min) ||
internal::AddOverflow(relative_range.exclusive_max, offset,
&result.exclusive_max)) {
return absl::FailedPreconditionError(tensorstore::StrCat(
"Byte range ", relative_range,
" relative to the end of the shard index (", offset, ") is not valid"));
}
return result;
}
const EncodedChunk* FindChunk(span<const EncodedChunk> chunks,
MinishardAndChunkId minishard_and_chunk_id) {
const auto chunk_it = std::lower_bound(
chunks.begin(), chunks.end(), minishard_and_chunk_id,
[](const auto& chunk, const auto& minishard_and_chunk_id) {
return chunk.minishard_and_chunk_id < minishard_and_chunk_id;
});
if (chunk_it == chunks.end() ||
chunk_it->minishard_and_chunk_id != minishard_and_chunk_id) {
return nullptr;
}
return &*chunk_it;
}
}
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::MatchesStatus;
using ::tensorstore::neuroglancer_uint64_sharded::MinishardIndexEntry;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
TEST(ShardingSpecTest, Comparison) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec b{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec c{
ShardingSpec::HashFunction::identity,
2,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec d{
ShardingSpec::HashFunction::identity,
1,
5,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec e{
ShardingSpec::HashFunction::identity,
1,
2,
9,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec f{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::gzip,
};
ShardingSpec g{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_EQ(e, e);
EXPECT_EQ(f, f);
EXPECT_EQ(g, g);
EXPECT_NE(a, b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(a, e);
EXPECT_NE(a, f);
EXPECT_NE(a, g);
}
TEST(ShardingSpecTest, ToJson) {
ShardingSpec a{
ShardingSpec::HashFunction::identity,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
};
EXPECT_EQ(::nlohmann::json({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::nlohmann::json(a));
}
TEST(ShardingSpecTest, Parse) {
for (auto h : {ShardingSpec::HashFunction::identity,
ShardingSpec::HashFunction::murmurhash3_x86_128}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", ::nlohmann::json(h)},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "raw"},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
h,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::gzip,
}));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"data_encoding", "gzip"}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::murmurhash3_x86_128,
1,
2,
3,
ShardingSpec::DataEncoding::gzip,
ShardingSpec::DataEncoding::raw,
}));
for (const char* k :
{"@type", "hash", "preshift_bits", "minishard_bits", "shard_bits"}) {
::nlohmann::json j{{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}};
j.erase(k);
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
j[k] = nullptr;
EXPECT_THAT(ShardingSpec::FromJson(j),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v2"},
{"hash", "murmurhash3_x86_128"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"neuroglancer_uint64_sharded_v2\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "invalid_hash"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "gzip"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_hash\".*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", 1234}}),
MatchesStatus(absl::StatusCode::kInvalidArgument, ".*1234.*"));
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 2},
{"shard_bits", 3},
{"minishard_index_encoding", "raw"},
{"data_encoding", "invalid_encoding"}}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*\"invalid_encoding\".*"));
for (int i : {0, 1, 63, 64}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
i,
2,
3,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", i},
{"minishard_bits", 2},
{"shard_bits", 3}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 31, 32}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
i,
0,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 33, 34, 35}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", i},
{"shard_bits", 0}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
for (int i : {0, 1, 64 - 8, 64 - 7}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
::testing::Optional(ShardingSpec{
ShardingSpec::HashFunction::identity,
1,
7,
i,
ShardingSpec::DataEncoding::raw,
ShardingSpec::DataEncoding::raw,
}));
}
for (int i : {-1, -2, 64 - 6, 64 - 5, 65, 66}) {
EXPECT_THAT(
ShardingSpec::FromJson({{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 1},
{"minishard_bits", 7},
{"shard_bits", i}}),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
EXPECT_THAT(ShardingSpec::FromJson("invalid"),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST(MinishardIndexEntryTest, Comparison) {
MinishardIndexEntry a{{1}, {2, 3}};
MinishardIndexEntry b{{1}, {3, 4}};
MinishardIndexEntry c{{2}, {2, 3}};
MinishardIndexEntry d{{2}, {3, 4}};
EXPECT_EQ(a, a);
EXPECT_EQ(b, b);
EXPECT_EQ(c, c);
EXPECT_EQ(d, d);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a == b);
EXPECT_NE(a, c);
EXPECT_NE(a, d);
EXPECT_NE(b, c);
EXPECT_NE(b, d);
EXPECT_NE(c, d);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
078c6641-034b-4980-8a9d-61b5f28ff7fc | cpp | tensorflow/tensorflow | trt_testutils | tensorflow/compiler/tf2tensorrt/utils/trt_testutils.cc | tensorflow/compiler/tf2tensorrt/utils/trt_testutils_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <map>
#include <string>
#include <vector>
#include <gmock/gmock.h>
namespace tensorflow {
namespace tensorrt {
namespace convert {
::testing::Matcher<std::vector<float>> ArrayFloatNear(
const std::vector<float>& values, float max_abs_error, bool nan_sensitive) {
std::vector<::testing::Matcher<float>> matchers;
matchers.reserve(values.size());
for (const float& v : values) {
if (nan_sensitive) {
matchers.emplace_back(::testing::NanSensitiveFloatNear(v, max_abs_error));
} else if (max_abs_error == 0) {
matchers.emplace_back(::testing::FloatEq(v));
} else {
EXPECT_GE(max_abs_error, 0);
matchers.emplace_back(::testing::FloatNear(v, max_abs_error));
}
}
return ::testing::ElementsAreArray(matchers);
}
nvinfer1::Dims CreateDims(const std::vector<int>& d) {
nvinfer1::Dims dims;
dims.nbDims = d.size();
for (int i = 0; i < d.size(); ++i) {
dims.d[i] = d[i];
}
return dims;
}
NodeDef MakeNodeDef(const std::string& name, const std::string& op,
const std::vector<std::string>& inputs,
const std::map<std::string, AttrValue> attrs) {
NodeDef node_def;
node_def.set_name(name);
node_def.set_op(op);
for (const auto& input : inputs) {
node_def.add_input(input);
}
for (const auto& attr : attrs) {
(*node_def.mutable_attr())[attr.first] = attr.second;
}
return node_def;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/utils/trt_testutils.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::testing::AllOf;
using ::testing::AnyOf;
using ::testing::Eq;
using ::testing::Not;
TEST(TrtDimsMatcher, ParameterizedMatchers) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), DimsAreArray({1, 2, 3, 4}));
EXPECT_THAT(nvinfer1::Dims{}, Not(DimsAreArray({1, 2})));
std::vector<int> empty_dims;
EXPECT_THAT(nvinfer1::Dims{}, DimsAreArray(empty_dims));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 3, 5})));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(DimsAreArray({1, 2, 5})));
}
TEST(TrtDimsMatcher, EqualityMatcher) {
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Eq(nvinfer1::Dims4(1, 2, 3, 4)));
EXPECT_THAT(nvinfer1::Dims{}, Eq(nvinfer1::Dims()));
EXPECT_THAT(nvinfer1::Dims{}, Not(Eq(nvinfer1::DimsHW())));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4),
Not(Eq(nvinfer1::Dims4(1, 2, 3, 3))));
EXPECT_THAT(nvinfer1::Dims4(1, 2, 3, 4), Not(Eq(nvinfer1::Dims2(1, 2))));
}
TEST(INetworkDefinitionMatchers, CorrectlyMatch) {
Logger& logger = *Logger::GetLogger();
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(logger));
TrtUniquePtrType<nvinfer1::INetworkDefinition> network(
builder->createNetworkV2(0L));
EXPECT_THAT(network.get(), AllOf(Not(LayerNamesAreArray({"some layer"})),
LayerNamesNonEmpty()));
nvinfer1::Weights weights;
weights.type = nvinfer1::DataType::kFLOAT;
std::array<float, 1> vals;
weights.values = vals.data();
weights.count = 1;
auto input = network->addInput("input-tensor", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims3{1, 1, 1});
ASSERT_NE(input, nullptr);
const char* fc_layer_name = "my-fc-layer";
auto layer = network->addFullyConnected(*input, 1, weights, weights);
ASSERT_NE(layer, nullptr);
layer->setName(fc_layer_name);
EXPECT_THAT(network.get(),
AllOf(LayerNamesNonEmpty(), LayerNamesAreArray({fc_layer_name})));
layer = network->addFullyConnected(*input, 1, weights, weights);
EXPECT_THAT(network.get(), AllOf(LayerNamesNonEmpty(),
Not(LayerNamesAreArray({fc_layer_name}))));
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_testutils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_testutils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd8bc1a3-cd69-4c3e-b828-b3739fd2b8e0 | cpp | google/quiche | encapsulated_web_transport | quiche/web_transport/encapsulated/encapsulated_web_transport.cc | quiche/web_transport/encapsulated/encapsulated_web_transport_test.cc | #include "quiche/web_transport/encapsulated/encapsulated_web_transport.h"
#include <stdbool.h>
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "quiche/common/capsule.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/quiche_circular_deque.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/web_transport/web_transport.h"
namespace webtransport {
namespace {
using ::quiche::Capsule;
using ::quiche::CapsuleType;
using ::quiche::CloseWebTransportSessionCapsule;
constexpr uint64_t kEncapsulatedMaxDatagramSize = 9000;
constexpr StreamPriority kDefaultPriority = StreamPriority{0, 0};
}
EncapsulatedSession::EncapsulatedSession(
Perspective perspective, FatalErrorCallback fatal_error_callback)
: perspective_(perspective),
fatal_error_callback_(std::move(fatal_error_callback)),
capsule_parser_(this),
next_outgoing_bidi_stream_(perspective == Perspective::kClient ? 0 : 1),
next_outgoing_unidi_stream_(perspective == Perspective::kClient ? 2 : 3) {
QUICHE_DCHECK(IsIdOpenedBy(next_outgoing_bidi_stream_, perspective));
QUICHE_DCHECK(IsIdOpenedBy(next_outgoing_unidi_stream_, perspective));
}
void EncapsulatedSession::InitializeClient(
std::unique_ptr<SessionVisitor> visitor,
quiche::HttpHeaderBlock& , quiche::WriteStream* writer,
quiche::ReadStream* reader) {
if (state_ != kUninitialized) {
OnFatalError("Called InitializeClient() in an invalid state");
return;
}
if (perspective_ != Perspective::kClient) {
OnFatalError("Called InitializeClient() on a server session");
return;
}
visitor_ = std::move(visitor);
writer_ = writer;
reader_ = reader;
state_ = kWaitingForHeaders;
}
void EncapsulatedSession::InitializeServer(
std::unique_ptr<SessionVisitor> visitor,
const quiche::HttpHeaderBlock& ,
quiche::HttpHeaderBlock& , quiche::WriteStream* writer,
quiche::ReadStream* reader) {
if (state_ != kUninitialized) {
OnFatalError("Called InitializeServer() in an invalid state");
return;
}
if (perspective_ != Perspective::kServer) {
OnFatalError("Called InitializeServer() on a client session");
return;
}
visitor_ = std::move(visitor);
writer_ = writer;
reader_ = reader;
OpenSession();
}
void EncapsulatedSession::ProcessIncomingServerHeaders(
const quiche::HttpHeaderBlock& ) {
if (state_ != kWaitingForHeaders) {
OnFatalError("Called ProcessIncomingServerHeaders() in an invalid state");
return;
}
OpenSession();
}
void EncapsulatedSession::CloseSession(SessionErrorCode error_code,
absl::string_view error_message) {
switch (state_) {
case kUninitialized:
case kWaitingForHeaders:
OnFatalError(absl::StrCat(
"Attempted to close a session before it opened with error 0x",
absl::Hex(error_code), ": ", error_message));
return;
case kSessionClosing:
case kSessionClosed:
OnFatalError(absl::StrCat(
"Attempted to close a session that is already closed with error 0x",
absl::Hex(error_code), ": ", error_message));
return;
case kSessionOpen:
break;
}
state_ = kSessionClosing;
buffered_session_close_ =
BufferedClose{error_code, std::string(error_message)};
OnCanWrite();
}
Stream* EncapsulatedSession::AcceptIncomingStream(
quiche::QuicheCircularDeque<StreamId>& queue) {
while (!queue.empty()) {
StreamId id = queue.front();
queue.pop_front();
Stream* stream = GetStreamById(id);
if (stream == nullptr) {
continue;
}
return stream;
}
return nullptr;
}
Stream* EncapsulatedSession::AcceptIncomingBidirectionalStream() {
return AcceptIncomingStream(incoming_bidirectional_streams_);
}
Stream* EncapsulatedSession::AcceptIncomingUnidirectionalStream() {
return AcceptIncomingStream(incoming_unidirectional_streams_);
}
bool EncapsulatedSession::CanOpenNextOutgoingBidirectionalStream() {
return true;
}
bool EncapsulatedSession::CanOpenNextOutgoingUnidirectionalStream() {
return true;
}
Stream* EncapsulatedSession::OpenOutgoingStream(StreamId& counter) {
StreamId stream_id = counter;
counter += 4;
auto [it, inserted] = streams_.emplace(
std::piecewise_construct, std::forward_as_tuple(stream_id),
std::forward_as_tuple(this, stream_id));
QUICHE_DCHECK(inserted);
return &it->second;
}
Stream* EncapsulatedSession::OpenOutgoingBidirectionalStream() {
if (!CanOpenNextOutgoingBidirectionalStream()) {
return nullptr;
}
return OpenOutgoingStream(next_outgoing_bidi_stream_);
}
Stream* EncapsulatedSession::OpenOutgoingUnidirectionalStream() {
if (!CanOpenNextOutgoingUnidirectionalStream()) {
return nullptr;
}
return OpenOutgoingStream(next_outgoing_unidi_stream_);
}
Stream* EncapsulatedSession::GetStreamById(StreamId id) {
auto it = streams_.find(id);
if (it == streams_.end()) {
return nullptr;
}
return &it->second;
}
DatagramStats EncapsulatedSession::GetDatagramStats() {
DatagramStats stats;
stats.expired_outgoing = 0;
stats.lost_outgoing = 0;
return stats;
}
SessionStats EncapsulatedSession::GetSessionStats() {
return SessionStats();
}
void EncapsulatedSession::NotifySessionDraining() {
SendControlCapsule(quiche::DrainWebTransportSessionCapsule());
OnCanWrite();
}
void EncapsulatedSession::SetOnDraining(
quiche::SingleUseCallback<void()> callback) {
draining_callback_ = std::move(callback);
}
DatagramStatus EncapsulatedSession::SendOrQueueDatagram(
absl::string_view datagram) {
if (datagram.size() > GetMaxDatagramSize()) {
return DatagramStatus{
DatagramStatusCode::kTooBig,
absl::StrCat("Datagram is ", datagram.size(),
" bytes long, while the specified maximum size is ",
GetMaxDatagramSize())};
}
bool write_blocked;
switch (state_) {
case kUninitialized:
write_blocked = true;
break;
case kWaitingForHeaders:
case kSessionOpen:
write_blocked = !writer_->CanWrite();
break;
case kSessionClosing:
case kSessionClosed:
return DatagramStatus{DatagramStatusCode::kInternalError,
"Writing into an already closed session"};
}
if (write_blocked) {
control_capsule_queue_.push_back(
quiche::SerializeCapsule(Capsule::Datagram(datagram), allocator_));
return DatagramStatus{DatagramStatusCode::kSuccess, ""};
}
quiche::QuicheBuffer buffer =
quiche::SerializeDatagramCapsuleHeader(datagram.size(), allocator_);
std::array spans = {buffer.AsStringView(), datagram};
absl::Status write_status =
writer_->Writev(absl::MakeConstSpan(spans), quiche::StreamWriteOptions());
if (!write_status.ok()) {
OnWriteError(write_status);
return DatagramStatus{
DatagramStatusCode::kInternalError,
absl::StrCat("Write error for datagram: ", write_status.ToString())};
}
return DatagramStatus{DatagramStatusCode::kSuccess, ""};
}
uint64_t EncapsulatedSession::GetMaxDatagramSize() const {
return kEncapsulatedMaxDatagramSize;
}
void EncapsulatedSession::SetDatagramMaxTimeInQueue(
absl::Duration ) {
}
void EncapsulatedSession::OnCanWrite() {
if (state_ == kUninitialized || !writer_) {
OnFatalError("Trying to write before the session is initialized");
return;
}
if (state_ == kSessionClosed) {
OnFatalError("Trying to write before the session is closed");
return;
}
if (state_ == kSessionClosing) {
if (writer_->CanWrite()) {
CloseWebTransportSessionCapsule capsule{
buffered_session_close_.error_code,
buffered_session_close_.error_message};
quiche::QuicheBuffer buffer =
quiche::SerializeCapsule(Capsule(std::move(capsule)), allocator_);
absl::Status write_status = SendFin(buffer.AsStringView());
if (!write_status.ok()) {
OnWriteError(quiche::AppendToStatus(write_status,
" while writing WT_CLOSE_SESSION"));
return;
}
OnSessionClosed(buffered_session_close_.error_code,
buffered_session_close_.error_message);
}
return;
}
while (writer_->CanWrite() && !control_capsule_queue_.empty()) {
absl::Status write_status = quiche::WriteIntoStream(
*writer_, control_capsule_queue_.front().AsStringView());
if (!write_status.ok()) {
OnWriteError(write_status);
return;
}
control_capsule_queue_.pop_front();
}
while (writer_->CanWrite()) {
absl::StatusOr<StreamId> next_id = scheduler_.PopFront();
if (!next_id.ok()) {
QUICHE_DCHECK_EQ(next_id.status().code(), absl::StatusCode::kNotFound);
return;
}
auto it = streams_.find(*next_id);
if (it == streams_.end()) {
QUICHE_BUG(WT_H2_NextStreamNotInTheMap);
OnFatalError("Next scheduled stream is not in the map");
return;
}
QUICHE_DCHECK(it->second.HasPendingWrite());
it->second.FlushPendingWrite();
}
}
void EncapsulatedSession::OnCanRead() {
if (state_ == kSessionClosed || state_ == kSessionClosing) {
return;
}
bool has_fin = quiche::ProcessAllReadableRegions(
*reader_, [&](absl::string_view fragment) {
capsule_parser_.IngestCapsuleFragment(fragment);
});
if (has_fin) {
capsule_parser_.ErrorIfThereIsRemainingBufferedData();
OnSessionClosed(0, "");
}
if (state_ == kSessionOpen) {
GarbageCollectStreams();
}
}
bool EncapsulatedSession::OnCapsule(const quiche::Capsule& capsule) {
switch (capsule.capsule_type()) {
case CapsuleType::DATAGRAM:
visitor_->OnDatagramReceived(
capsule.datagram_capsule().http_datagram_payload);
break;
case CapsuleType::DRAIN_WEBTRANSPORT_SESSION:
if (draining_callback_) {
std::move(draining_callback_)();
}
break;
case CapsuleType::CLOSE_WEBTRANSPORT_SESSION:
OnSessionClosed(
capsule.close_web_transport_session_capsule().error_code,
std::string(
capsule.close_web_transport_session_capsule().error_message));
break;
case CapsuleType::WT_STREAM:
case CapsuleType::WT_STREAM_WITH_FIN:
ProcessStreamCapsule(capsule,
capsule.web_transport_stream_data().stream_id);
break;
case CapsuleType::WT_RESET_STREAM:
ProcessStreamCapsule(capsule,
capsule.web_transport_reset_stream().stream_id);
break;
case CapsuleType::WT_STOP_SENDING:
ProcessStreamCapsule(capsule,
capsule.web_transport_stop_sending().stream_id);
break;
default:
break;
}
return state_ != kSessionClosed;
}
void EncapsulatedSession::OnCapsuleParseFailure(
absl::string_view error_message) {
if (state_ == kSessionClosed) {
return;
}
OnFatalError(absl::StrCat("Stream parse error: ", error_message));
}
void EncapsulatedSession::ProcessStreamCapsule(const quiche::Capsule& capsule,
StreamId stream_id) {
bool new_stream_created = false;
auto it = streams_.find(stream_id);
if (it == streams_.end()) {
if (IsOutgoing(stream_id)) {
return;
}
it = streams_.emplace_hint(it, std::piecewise_construct,
std::forward_as_tuple(stream_id),
std::forward_as_tuple(this, stream_id));
new_stream_created = true;
}
InnerStream& stream = it->second;
stream.ProcessCapsule(capsule);
if (new_stream_created) {
if (IsBidirectionalId(stream_id)) {
incoming_bidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingBidirectionalStreamAvailable();
} else {
incoming_unidirectional_streams_.push_back(stream_id);
visitor_->OnIncomingUnidirectionalStreamAvailable();
}
}
}
void EncapsulatedSession::InnerStream::ProcessCapsule(
const quiche::Capsule& capsule) {
switch (capsule.capsule_type()) {
case CapsuleType::WT_STREAM:
case CapsuleType::WT_STREAM_WITH_FIN: {
if (fin_received_) {
session_->OnFatalError(
"Received stream data for a stream that has already received a "
"FIN");
return;
}
if (read_side_closed_) {
return;
}
fin_received_ = capsule.capsule_type() == CapsuleType::WT_STREAM_WITH_FIN;
const quiche::WebTransportStreamDataCapsule& data =
capsule.web_transport_stream_data();
if (!data.data.empty()) {
incoming_reads_.push_back(IncomingRead{data.data, std::string()});
}
if (visitor_ != nullptr) {
visitor_->OnCanRead();
}
for (IncomingRead& read : incoming_reads_) {
QUICHE_DCHECK(!read.data.empty());
if (read.storage.empty()) {
read.storage = std::string(read.data);
read.data = read.storage;
}
}
return;
}
case CapsuleType::WT_RESET_STREAM:
CloseReadSide(capsule.web_transport_reset_stream().error_code);
return;
case CapsuleType::WT_STOP_SENDING:
CloseWriteSide(capsule.web_transport_stop_sending().error_code);
return;
default:
QUICHE_BUG(WT_H2_ProcessStreamCapsule_Unknown)
<< "Unexpected capsule dispatched to InnerStream: " << capsule;
session_->OnFatalError(
"Internal error: Unexpected capsule dispatched to InnerStream");
return;
}
}
void EncapsulatedSession::OpenSession() {
state_ = kSessionOpen;
visitor_->OnSessionReady();
OnCanWrite();
OnCanRead();
}
absl::Status EncapsulatedSession::SendFin(absl::string_view data) {
QUICHE_DCHECK(!fin_sent_);
fin_sent_ = true;
quiche::StreamWriteOptions options;
options.set_send_fin(true);
return quiche::WriteIntoStream(*writer_, data, options);
}
void EncapsulatedSession::OnSessionClosed(SessionErrorCode error_code,
const std::string& error_message) {
if (!fin_sent_) {
absl::Status status = SendFin("");
if (!status.ok()) {
OnWriteError(status);
return;
}
}
if (session_close_notified_) {
QUICHE_DCHECK_EQ(state_, kSessionClosed);
return;
}
state_ = kSessionClosed;
session_close_notified_ = true;
if (visitor_ != nullptr) {
visitor_->OnSessionClosed(error_code, error_message);
}
}
void EncapsulatedSession::OnFatalError(absl::string_view error_message) {
QUICHE_DLOG(ERROR) << "Fatal error in encapsulated WebTransport: "
<< error_message;
state_ = kSessionClosed;
if (fatal_error_callback_) {
std::move(fatal_error_callback_)(error_message);
fatal_error_callback_ = nullptr;
}
}
void EncapsulatedSession::OnWriteError(absl::Status error) {
OnFatalError(absl::StrCat(
error, " while trying to write encapsulated WebTransport data"));
}
EncapsulatedSession::InnerStream::InnerStream(EncapsulatedSession* session,
StreamId id)
: session_(session),
id_(id),
read_side_closed_(IsUnidirectionalId(id) &&
IsIdOpenedBy(id, session->perspective_)),
write_side_closed_(IsUnidirectionalId(id) &&
!IsIdOpenedBy(id, session->perspective_)) {
if (!write_side_closed_) {
absl::Status status = session_->scheduler_.Register(id_, kDefaultPriority);
if (!status.ok()) {
QUICHE_BUG(WT_H2_FailedToRegisterNewStream) << status;
session_->OnFatalError(
"Failed to register new stream with the scheduler");
return;
}
}
}
quiche::ReadStream::ReadResult EncapsulatedSession::InnerStream::Read(
absl::Span<char> output) {
const size_t total_size = output.size();
for (const IncomingRead& read : incoming_reads_) {
size_t size_to_read = std::min(read.size(), output.size());
if (size_to_read == 0) {
break;
}
memcpy(output.data(), read.data.data(), size_to_read);
output = output.subspan(size_to_read);
}
bool fin_consumed = SkipBytes(total_size);
return ReadResult{total_size, fin_consumed};
}
quiche::ReadStream::ReadResult EncapsulatedSession::InnerStream::Read(
std::string* output) {
const size_t total_size = ReadableBytes();
const size_t initial_offset = output->size();
output->resize(initial_offset + total_size);
return Read(absl::Span<char>(&((*output)[initial_offset]), total_size));
}
size_t EncapsulatedSession::InnerStream::ReadableBytes() const {
size_t total_size = 0;
for (const IncomingRead& read : incoming_reads_) {
total_size += read.size();
}
return total_size;
}
quiche::ReadStream::PeekResult
EncapsulatedSession::InnerStream::PeekNextReadableRegion() const {
if (incoming_reads_.empty()) {
return PeekResult{absl::string_view(), fin_received_, fin_received_};
}
return PeekResult{incoming_reads_.front().data,
fin_received_ && incoming_reads_.size() == 1,
fin_received_};
}
bool EncapsulatedSession::InnerStream::SkipBytes(size_t bytes) {
size_t remaining = bytes;
while (remaining > 0) {
if (incoming_reads_.empty()) {
QUICHE_BUG(WT_H2_SkipBytes_toomuch)
<< "Requested to skip " << remaining
<< " bytes that are not present in the read buffer.";
return false;
}
IncomingRead& current = incoming_reads_.front();
if (remaining < current.size()) {
current.data = current.data.substr(remaining);
return false;
}
remaining -= current.size();
incoming_reads_.pop_front();
}
if (incoming_reads_.empty() && fin_received_) {
fin_consumed_ = true;
CloseReadSide(std::nullopt);
return true;
}
return false;
}
absl::Status EncapsulatedSession::InnerStream::Writev(
const absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
if (write_side_closed_) {
return absl::FailedPreconditionError(
"Trying to write into an already-closed stream");
}
if (fin_buffered_) {
return absl::FailedPreconditionError("FIN already buffered");
}
if (!CanWrite()) {
return absl::FailedPreconditionError(
"Trying to write into a stream when CanWrite() = false");
}
const absl::StatusOr<bool> should_yield =
session_->scheduler_.ShouldYield(id_);
if (!should_yield.ok()) {
QUICHE_BUG(WT_H2_Writev_NotRegistered) << should_yield.status();
session_->OnFatalError("Stream not registered with the scheduler");
return absl::InternalError("Stream not registered with the scheduler");
}
const bool write_blocked = !session_->writer_->CanWrite() || *should_yield ||
!pending_write_.empty();
if (write_blocked) {
fin_buffered_ = options.send_fin();
for (absl::string_view chunk : data) {
absl::StrAppend(&pending_write_, chunk);
}
absl::Status status = session_->scheduler_.Schedule(id_);
if (!status.ok()) {
QUICHE_BUG(WT_H2_Writev_CantSchedule) << status;
session_->OnFatalError("Could not schedule a write-blocked stream");
return absl::InternalError("Could not schedule a write-blocked stream");
}
return absl::OkStatus();
}
size_t bytes_written = WriteInner(data, options.send_fin());
QUICHE_DCHECK(bytes_written == 0 ||
bytes_written == quiche::TotalStringViewSpanSize(data));
if (bytes_written == 0) {
for (absl::string_view chunk : data) {
absl::StrAppend(&pending_write_, chunk);
}
}
if (options.send_fin()) {
CloseWriteSide(std::nullopt);
}
return absl::OkStatus();
}
bool EncapsulatedSession::InnerStream::CanWrite() const {
return session_->state_ != EncapsulatedSession::kSessionClosed &&
!write_side_closed_ &&
(pending_write_.size() <= session_->max_stream_data_buffered_);
}
void EncapsulatedSession::InnerStream::FlushPendingWrite() {
QUICHE_DCHECK(!write_side_closed_);
QUICHE_DCHECK(session_->writer_->CanWrite());
QUICHE_DCHECK(!pending_write_.empty());
absl::string_view to_write = pending_write_;
size_t bytes_written =
WriteInner(absl::MakeSpan(&to_write, 1), fin_buffered_);
if (bytes_written < to_write.size()) {
pending_write_ = pending_write_.substr(bytes_written);
return;
}
pending_write_.clear();
if (fin_buffered_) {
CloseWriteSide(std::nullopt);
}
if (!write_side_closed_ && visitor_ != nullptr) {
visitor_->OnCanWrite();
}
}
size_t EncapsulatedSession::InnerStream::WriteInner(
absl::Span<const absl::string_view> data, bool fin) {
size_t total_size = quiche::TotalStringViewSpanSize(data);
if (total_size == 0 && !fin) {
session_->OnFatalError("Attempted to make an empty write with fin=false");
return 0;
}
quiche::QuicheBuffer header =
quiche::SerializeWebTransportStreamCapsuleHeader(id_, fin, total_size,
session_->allocator_);
std::vector<absl::string_view> views_to_write;
views_to_write.reserve(data.size() + 1);
views_to_write.push_back(header.AsStringView());
absl::c_copy(data, std::back_inserter(views_to_write));
absl::Status write_status = session_->writer_->Writev(
views_to_write, quiche::kDefaultStreamWriteOptions);
if (!write_status.ok()) {
session_->OnWriteError(write_status);
return 0;
}
return total_size;
}
void EncapsulatedSession::InnerStream::AbruptlyTerminate(absl::Status error) {
QUICHE_DLOG(INFO) << "Abruptly terminating the stream due to error: "
<< error;
ResetDueToInternalError();
}
void EncapsulatedSession::InnerStream::ResetWithUserCode(
StreamErrorCode error) {
if (reset_frame_sent_) {
return;
}
reset_frame_sent_ = true;
session_->SendControlCapsule(
quiche::WebTransportResetStreamCapsule{id_, error});
CloseWriteSide(std::nullopt);
}
void EncapsulatedSession::InnerStream::SendStopSending(StreamErrorCode error) {
if (stop_sending_sent_) {
return;
}
stop_sending_sent_ = true;
session_->SendControlCapsule(
quiche::WebTransportStopSendingCapsule{id_, error});
CloseReadSide(std::nullopt);
}
void EncapsulatedSession::InnerStream::CloseReadSide(
std::optional<StreamErrorCode> error) {
if (read_side_closed_) {
return;
}
read_side_closed_ = true;
incoming_reads_.clear();
if (error.has_value() && visitor_ != nullptr) {
visitor_->OnResetStreamReceived(*error);
}
if (CanBeGarbageCollected()) {
session_->streams_to_garbage_collect_.push_back(id_);
}
}
void EncapsulatedSession::InnerStream::CloseWriteSide(
std::optional<StreamErrorCode> error) {
if (write_side_closed_) {
return;
}
write_side_closed_ = true;
pending_write_.clear();
absl::Status status = session_->scheduler_.Unregister(id_);
if (!status.ok()) {
session_->OnFatalError("Failed to unregister closed stream");
return;
}
if (error.has_value() && visitor_ != nullptr) {
visitor_->OnStopSendingReceived(*error);
}
if (CanBeGarbageCollected()) {
session_->streams_to_garbage_collect_.push_back(id_);
}
}
void EncapsulatedSession::GarbageCollectStreams() {
for (StreamId id : streams_to_garbage_collect_) {
streams_.erase(id);
}
streams_to_garbage_collect_.clear();
}
void EncapsulatedSession::InnerStream::SetPriority(
const StreamPriority& priority) {
absl::Status status;
status = session_->scheduler_.UpdateSendGroup(id_, priority.send_group_id);
QUICHE_BUG_IF(EncapsulatedWebTransport_SetPriority_group, !status.ok())
<< status;
status = session_->scheduler_.UpdateSendOrder(id_, priority.send_order);
QUICHE_BUG_IF(EncapsulatedWebTransport_SetPriority_order, !status.ok())
<< status;
}
} | #include "quiche/web_transport/encapsulated/encapsulated_web_transport.h"
#include <array>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/common/capsule.h"
#include "quiche/common/http/http_header_block.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/mock_streams.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
#include "quiche/web_transport/test_tools/mock_web_transport.h"
#include "quiche/web_transport/web_transport.h"
namespace webtransport::test {
namespace {
using ::quiche::Capsule;
using ::quiche::CapsuleType;
using ::quiche::test::StatusIs;
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Return;
using ::testing::StrEq;
class EncapsulatedWebTransportTest : public quiche::test::QuicheTest,
public quiche::CapsuleParser::Visitor {
public:
EncapsulatedWebTransportTest() : parser_(this), reader_(&read_buffer_) {
ON_CALL(fatal_error_callback_, Call(_))
.WillByDefault([](absl::string_view error) {
ADD_FAILURE() << "Fatal session error: " << error;
});
ON_CALL(writer_, Writev(_, _))
.WillByDefault([&](absl::Span<const absl::string_view> data,
const quiche::StreamWriteOptions& options) {
for (absl::string_view fragment : data) {
parser_.IngestCapsuleFragment(fragment);
}
writer_.ProcessOptions(options);
return absl::OkStatus();
});
}
std::unique_ptr<EncapsulatedSession> CreateTransport(
Perspective perspective) {
auto transport = std::make_unique<EncapsulatedSession>(
perspective, fatal_error_callback_.AsStdFunction());
session_ = transport.get();
return transport;
}
std::unique_ptr<SessionVisitor> CreateAndStoreVisitor() {
auto visitor = std::make_unique<testing::StrictMock<MockSessionVisitor>>();
visitor_ = visitor.get();
return visitor;
}
MOCK_METHOD(bool, OnCapsule, (const Capsule&), (override));
void OnCapsuleParseFailure(absl::string_view error_message) override {
ADD_FAILURE() << "Written an invalid capsule: " << error_message;
}
void ProcessIncomingCapsule(const Capsule& capsule) {
quiche::QuicheBuffer buffer =
quiche::SerializeCapsule(capsule, quiche::SimpleBufferAllocator::Get());
read_buffer_.append(buffer.data(), buffer.size());
session_->OnCanRead();
}
template <typename CapsuleType>
void ProcessIncomingCapsule(const CapsuleType& capsule) {
quiche::QuicheBuffer buffer = quiche::SerializeCapsule(
quiche::Capsule(capsule), quiche::SimpleBufferAllocator::Get());
read_buffer_.append(buffer.data(), buffer.size());
session_->OnCanRead();
}
void DefaultHandshakeForClient(EncapsulatedSession& session) {
quiche::HttpHeaderBlock outgoing_headers, incoming_headers;
session.InitializeClient(CreateAndStoreVisitor(), outgoing_headers,
&writer_, &reader_);
EXPECT_CALL(*visitor_, OnSessionReady());
session.ProcessIncomingServerHeaders(incoming_headers);
}
protected:
quiche::CapsuleParser parser_;
quiche::test::MockWriteStream writer_;
std::string read_buffer_;
quiche::test::ReadStreamFromString reader_;
MockSessionVisitor* visitor_ = nullptr;
EncapsulatedSession* session_ = nullptr;
testing::MockFunction<void(absl::string_view)> fatal_error_callback_;
};
TEST_F(EncapsulatedWebTransportTest, IsOpenedBy) {
EXPECT_EQ(IsIdOpenedBy(0x00, Perspective::kClient), true);
EXPECT_EQ(IsIdOpenedBy(0x01, Perspective::kClient), false);
EXPECT_EQ(IsIdOpenedBy(0x02, Perspective::kClient), true);
EXPECT_EQ(IsIdOpenedBy(0x03, Perspective::kClient), false);
EXPECT_EQ(IsIdOpenedBy(0x00, Perspective::kServer), false);
EXPECT_EQ(IsIdOpenedBy(0x01, Perspective::kServer), true);
EXPECT_EQ(IsIdOpenedBy(0x02, Perspective::kServer), false);
EXPECT_EQ(IsIdOpenedBy(0x03, Perspective::kServer), true);
}
TEST_F(EncapsulatedWebTransportTest, SetupClientSession) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
quiche::HttpHeaderBlock outgoing_headers, incoming_headers;
EXPECT_EQ(session->state(), EncapsulatedSession::kUninitialized);
session->InitializeClient(CreateAndStoreVisitor(), outgoing_headers, &writer_,
&reader_);
EXPECT_EQ(session->state(), EncapsulatedSession::kWaitingForHeaders);
EXPECT_CALL(*visitor_, OnSessionReady());
session->ProcessIncomingServerHeaders(incoming_headers);
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionOpen);
}
TEST_F(EncapsulatedWebTransportTest, SetupServerSession) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kServer);
quiche::HttpHeaderBlock outgoing_headers, incoming_headers;
EXPECT_EQ(session->state(), EncapsulatedSession::kUninitialized);
std::unique_ptr<SessionVisitor> visitor = CreateAndStoreVisitor();
EXPECT_CALL(*visitor_, OnSessionReady());
session->InitializeServer(std::move(visitor), outgoing_headers,
incoming_headers, &writer_, &reader_);
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionOpen);
}
TEST_F(EncapsulatedWebTransportTest, CloseSession) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::CLOSE_WEBTRANSPORT_SESSION);
EXPECT_EQ(capsule.close_web_transport_session_capsule().error_code, 0x1234);
EXPECT_EQ(capsule.close_web_transport_session_capsule().error_message,
"test close");
return true;
});
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionOpen);
EXPECT_CALL(*visitor_, OnSessionClosed(0x1234, StrEq("test close")));
session->CloseSession(0x1234, "test close");
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionClosed);
EXPECT_TRUE(writer_.fin_written());
EXPECT_CALL(fatal_error_callback_, Call(_))
.WillOnce([](absl::string_view error) {
EXPECT_THAT(error, HasSubstr("close a session that is already closed"));
});
session->CloseSession(0x1234, "test close");
}
TEST_F(EncapsulatedWebTransportTest, CloseSessionWriteBlocked) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(writer_, CanWrite()).WillOnce(Return(false));
EXPECT_CALL(*this, OnCapsule(_)).Times(0);
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionOpen);
session->CloseSession(0x1234, "test close");
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionClosing);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::CLOSE_WEBTRANSPORT_SESSION);
EXPECT_EQ(capsule.close_web_transport_session_capsule().error_code, 0x1234);
EXPECT_EQ(capsule.close_web_transport_session_capsule().error_message,
"test close");
return true;
});
EXPECT_CALL(writer_, CanWrite()).WillOnce(Return(true));
EXPECT_CALL(*visitor_, OnSessionClosed(0x1234, StrEq("test close")));
session->OnCanWrite();
EXPECT_EQ(session->state(), EncapsulatedSession::kSessionClosed);
EXPECT_TRUE(writer_.fin_written());
}
TEST_F(EncapsulatedWebTransportTest, ReceiveFin) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnSessionClosed(0, IsEmpty()));
reader_.set_fin();
session->OnCanRead();
EXPECT_TRUE(writer_.fin_written());
}
TEST_F(EncapsulatedWebTransportTest, ReceiveCloseSession) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnSessionClosed(0x1234, StrEq("test")));
ProcessIncomingCapsule(Capsule::CloseWebTransportSession(0x1234, "test"));
EXPECT_TRUE(writer_.fin_written());
reader_.set_fin();
session->OnCanRead();
}
TEST_F(EncapsulatedWebTransportTest, ReceiveMalformedData) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(fatal_error_callback_, Call(HasSubstr("too much capsule data")))
.WillOnce([] {});
read_buffer_ = std::string(2 * 1024 * 1024, '\xff');
session->OnCanRead();
}
TEST_F(EncapsulatedWebTransportTest, SendDatagrams) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), quiche::CapsuleType::DATAGRAM);
EXPECT_EQ(capsule.datagram_capsule().http_datagram_payload, "test");
return true;
});
DatagramStatus status = session->SendOrQueueDatagram("test");
EXPECT_EQ(status.code, DatagramStatusCode::kSuccess);
}
TEST_F(EncapsulatedWebTransportTest, SendDatagramsEarly) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
quiche::HttpHeaderBlock outgoing_headers;
session->InitializeClient(CreateAndStoreVisitor(), outgoing_headers, &writer_,
&reader_);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), quiche::CapsuleType::DATAGRAM);
EXPECT_EQ(capsule.datagram_capsule().http_datagram_payload, "test");
return true;
});
ASSERT_EQ(session->state(), EncapsulatedSession::kWaitingForHeaders);
session->SendOrQueueDatagram("test");
}
TEST_F(EncapsulatedWebTransportTest, SendDatagramsBeforeInitialization) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
quiche::HttpHeaderBlock outgoing_headers;
EXPECT_CALL(*this, OnCapsule(_)).Times(0);
ASSERT_EQ(session->state(), EncapsulatedSession::kUninitialized);
session->SendOrQueueDatagram("test");
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::DATAGRAM);
EXPECT_EQ(capsule.datagram_capsule().http_datagram_payload, "test");
return true;
});
DefaultHandshakeForClient(*session);
}
TEST_F(EncapsulatedWebTransportTest, SendDatagramsTooBig) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*this, OnCapsule(_)).Times(0);
std::string long_string(16 * 1024, 'a');
DatagramStatus status = session->SendOrQueueDatagram(long_string);
EXPECT_EQ(status.code, DatagramStatusCode::kTooBig);
}
TEST_F(EncapsulatedWebTransportTest, ReceiveDatagrams) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnDatagramReceived(_))
.WillOnce([](absl::string_view data) { EXPECT_EQ(data, "test"); });
ProcessIncomingCapsule(Capsule::Datagram("test"));
}
TEST_F(EncapsulatedWebTransportTest, SendDraining) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::DRAIN_WEBTRANSPORT_SESSION);
return true;
});
session->NotifySessionDraining();
}
TEST_F(EncapsulatedWebTransportTest, ReceiveDraining) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
testing::MockFunction<void()> callback;
session->SetOnDraining(callback.AsStdFunction());
EXPECT_CALL(callback, Call());
ProcessIncomingCapsule(Capsule(quiche::DrainWebTransportSessionCapsule()));
}
TEST_F(EncapsulatedWebTransportTest, WriteErrorDatagram) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(writer_, Writev(_, _))
.WillOnce(Return(absl::InternalError("Test write error")));
EXPECT_CALL(fatal_error_callback_, Call(_))
.WillOnce([](absl::string_view error) {
EXPECT_THAT(error, HasSubstr("Test write error"));
});
DatagramStatus status = session->SendOrQueueDatagram("test");
EXPECT_EQ(status.code, DatagramStatusCode::kInternalError);
}
TEST_F(EncapsulatedWebTransportTest, WriteErrorControlCapsule) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(writer_, Writev(_, _))
.WillOnce(Return(absl::InternalError("Test write error")));
EXPECT_CALL(fatal_error_callback_, Call(_))
.WillOnce([](absl::string_view error) {
EXPECT_THAT(error, HasSubstr("Test write error"));
});
session->NotifySessionDraining();
}
TEST_F(EncapsulatedWebTransportTest, SimpleRead) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
bool stream_received = false;
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable())
.WillOnce([&] { stream_received = true; });
std::string data = "test";
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, data, false});
data[0] = 'q';
EXPECT_TRUE(stream_received);
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_EQ(stream->GetStreamId(), 1u);
EXPECT_EQ(stream->visitor(), nullptr);
EXPECT_EQ(stream->ReadableBytes(), 4u);
quiche::ReadStream::PeekResult peek = stream->PeekNextReadableRegion();
EXPECT_EQ(peek.peeked_data, "test");
EXPECT_FALSE(peek.fin_next);
EXPECT_FALSE(peek.all_data_received);
std::string buffer;
quiche::ReadStream::ReadResult read = stream->Read(&buffer);
EXPECT_EQ(read.bytes_read, 4);
EXPECT_FALSE(read.fin);
EXPECT_EQ(buffer, "test");
EXPECT_EQ(stream->ReadableBytes(), 0u);
}
class MockStreamVisitorWithDestructor : public MockStreamVisitor {
public:
~MockStreamVisitorWithDestructor() { OnDelete(); }
MOCK_METHOD(void, OnDelete, (), ());
};
MockStreamVisitorWithDestructor* SetupVisitor(Stream& stream) {
auto visitor = std::make_unique<MockStreamVisitorWithDestructor>();
MockStreamVisitorWithDestructor* result = visitor.get();
stream.SetVisitor(std::move(visitor));
return result;
}
TEST_F(EncapsulatedWebTransportTest, ImmediateRead) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(
quiche::WebTransportStreamDataCapsule{1, "abcd", false});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_EQ(stream->ReadableBytes(), 4u);
MockStreamVisitor* visitor = SetupVisitor(*stream);
EXPECT_CALL(*visitor, OnCanRead()).WillOnce([&] {
std::string output;
(void)stream->Read(&output);
EXPECT_EQ(output, "abcdef");
});
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "ef", false});
}
TEST_F(EncapsulatedWebTransportTest, FinPeek) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(
quiche::WebTransportStreamDataCapsule{1, "abcd", false});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_EQ(stream->ReadableBytes(), 4u);
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "ef", true});
quiche::ReadStream::PeekResult peek = stream->PeekNextReadableRegion();
EXPECT_EQ(peek.peeked_data, "abcd");
EXPECT_FALSE(peek.fin_next);
EXPECT_TRUE(peek.all_data_received);
EXPECT_FALSE(stream->SkipBytes(2));
peek = stream->PeekNextReadableRegion();
EXPECT_FALSE(peek.fin_next);
EXPECT_TRUE(peek.all_data_received);
EXPECT_FALSE(stream->SkipBytes(2));
peek = stream->PeekNextReadableRegion();
EXPECT_EQ(peek.peeked_data, "ef");
EXPECT_TRUE(peek.fin_next);
EXPECT_TRUE(peek.all_data_received);
EXPECT_TRUE(stream->SkipBytes(2));
}
TEST_F(EncapsulatedWebTransportTest, FinRead) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(
quiche::WebTransportStreamDataCapsule{1, "abcdef", true});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_EQ(stream->ReadableBytes(), 6u);
std::array<char, 3> buffer;
quiche::ReadStream::ReadResult read = stream->Read(absl::MakeSpan(buffer));
EXPECT_THAT(buffer, ElementsAre('a', 'b', 'c'));
EXPECT_EQ(read.bytes_read, 3);
EXPECT_FALSE(read.fin);
read = stream->Read(absl::MakeSpan(buffer));
EXPECT_THAT(buffer, ElementsAre('d', 'e', 'f'));
EXPECT_EQ(read.bytes_read, 3);
EXPECT_TRUE(read.fin);
}
TEST_F(EncapsulatedWebTransportTest, LargeRead) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{
1, std::string(64 * 1024, 'a'), true});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_EQ(stream->ReadableBytes(), 65536u);
for (int i = 0; i < 64; i++) {
std::array<char, 1024> buffer;
quiche::ReadStream::ReadResult read = stream->Read(absl::MakeSpan(buffer));
EXPECT_EQ(read.bytes_read, 1024);
EXPECT_EQ(read.fin, i == 63);
}
}
TEST_F(EncapsulatedWebTransportTest, DoubleFinReceived) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "abc", true});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(fatal_error_callback_, Call(_))
.WillOnce([](absl::string_view error) {
EXPECT_THAT(error, HasSubstr("has already received a FIN"));
});
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "def", true});
}
TEST_F(EncapsulatedWebTransportTest, CanWriteUnidiBidi) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
EXPECT_CALL(*visitor_, OnIncomingUnidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "abc", true});
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{3, "abc", true});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_TRUE(stream->CanWrite());
stream = session->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_FALSE(stream->CanWrite());
stream = session->OpenOutgoingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_TRUE(stream->CanWrite());
stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_TRUE(stream->CanWrite());
}
TEST_F(EncapsulatedWebTransportTest, ReadOnlyGarbageCollection) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingUnidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{3, "abc", true});
Stream* stream = session->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_TRUE(stream->SkipBytes(3));
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
bool deleted = false;
EXPECT_CALL(*visitor, OnDelete()).WillOnce([&] { deleted = true; });
session->GarbageCollectStreams();
EXPECT_TRUE(deleted);
}
TEST_F(EncapsulatedWebTransportTest, WriteOnlyGarbageCollection) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
bool deleted = false;
EXPECT_CALL(*visitor, OnDelete()).WillOnce([&] { deleted = true; });
EXPECT_CALL(*this, OnCapsule(_)).WillOnce(Return(true));
quiche::StreamWriteOptions options;
options.set_send_fin(true);
EXPECT_THAT(stream->Writev(absl::Span<const absl::string_view>(), options),
StatusIs(absl::StatusCode::kOk));
session->GarbageCollectStreams();
EXPECT_TRUE(deleted);
}
TEST_F(EncapsulatedWebTransportTest, SimpleWrite) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingBidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{1, "", true});
Stream* stream = session->AcceptIncomingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM);
EXPECT_EQ(capsule.web_transport_stream_data().stream_id, 1u);
EXPECT_EQ(capsule.web_transport_stream_data().fin, false);
EXPECT_EQ(capsule.web_transport_stream_data().data, "test");
return true;
});
absl::Status status = quiche::WriteIntoStream(*stream, "test");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
}
TEST_F(EncapsulatedWebTransportTest, WriteWithFin) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM_WITH_FIN);
EXPECT_EQ(capsule.web_transport_stream_data().stream_id, 2u);
EXPECT_EQ(capsule.web_transport_stream_data().fin, true);
EXPECT_EQ(capsule.web_transport_stream_data().data, "test");
return true;
});
quiche::StreamWriteOptions options;
options.set_send_fin(true);
EXPECT_TRUE(stream->CanWrite());
absl::Status status = quiche::WriteIntoStream(*stream, "test", options);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_FALSE(stream->CanWrite());
}
TEST_F(EncapsulatedWebTransportTest, FinOnlyWrite) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM_WITH_FIN);
EXPECT_EQ(capsule.web_transport_stream_data().stream_id, 2u);
EXPECT_EQ(capsule.web_transport_stream_data().fin, true);
EXPECT_EQ(capsule.web_transport_stream_data().data, "");
return true;
});
quiche::StreamWriteOptions options;
options.set_send_fin(true);
EXPECT_TRUE(stream->CanWrite());
absl::Status status =
stream->Writev(absl::Span<const absl::string_view>(), options);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_FALSE(stream->CanWrite());
}
TEST_F(EncapsulatedWebTransportTest, BufferedWriteThenUnbuffer) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(writer_, CanWrite()).WillOnce(Return(false));
absl::Status status = quiche::WriteIntoStream(*stream, "abc");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_TRUE(stream->CanWrite());
EXPECT_CALL(writer_, CanWrite()).WillRepeatedly(Return(true));
status = quiche::WriteIntoStream(*stream, "def");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM);
EXPECT_EQ(capsule.web_transport_stream_data().stream_id, 2u);
EXPECT_EQ(capsule.web_transport_stream_data().data, "abcdef");
return true;
});
session_->OnCanWrite();
}
TEST_F(EncapsulatedWebTransportTest, BufferedWriteThenFlush) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
EXPECT_CALL(writer_, CanWrite()).Times(2).WillRepeatedly(Return(false));
absl::Status status = quiche::WriteIntoStream(*stream, "abc");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
status = quiche::WriteIntoStream(*stream, "def");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_CALL(writer_, CanWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM);
EXPECT_EQ(capsule.web_transport_stream_data().stream_id, 2u);
EXPECT_EQ(capsule.web_transport_stream_data().data, "abcdef");
return true;
});
session_->OnCanWrite();
}
TEST_F(EncapsulatedWebTransportTest, BufferedStreamBlocksAnother) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream1 = session->OpenOutgoingUnidirectionalStream();
Stream* stream2 = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream1 != nullptr);
ASSERT_TRUE(stream2 != nullptr);
EXPECT_CALL(*this, OnCapsule(_)).Times(0);
EXPECT_CALL(writer_, CanWrite()).WillOnce(Return(false));
absl::Status status = quiche::WriteIntoStream(*stream1, "abc");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
EXPECT_CALL(writer_, CanWrite()).WillRepeatedly(Return(true));
status = quiche::WriteIntoStream(*stream2, "abc");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
std::vector<StreamId> writes;
EXPECT_CALL(*this, OnCapsule(_)).WillRepeatedly([&](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STREAM);
writes.push_back(capsule.web_transport_stream_data().stream_id);
return true;
});
session_->OnCanWrite();
EXPECT_THAT(writes, ElementsAre(2, 6));
}
TEST_F(EncapsulatedWebTransportTest, SendReset) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([&](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_RESET_STREAM);
EXPECT_EQ(capsule.web_transport_reset_stream().stream_id, 2u);
EXPECT_EQ(capsule.web_transport_reset_stream().error_code, 1234u);
return true;
});
stream->ResetWithUserCode(1234u);
bool deleted = false;
EXPECT_CALL(*visitor, OnDelete()).WillOnce([&] { deleted = true; });
session->GarbageCollectStreams();
EXPECT_TRUE(deleted);
}
TEST_F(EncapsulatedWebTransportTest, ReceiveReset) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingUnidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{3, "", true});
Stream* stream = session->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
EXPECT_CALL(*visitor, OnResetStreamReceived(1234u));
EXPECT_TRUE(session->GetStreamById(3) != nullptr);
ProcessIncomingCapsule(quiche::WebTransportResetStreamCapsule{3u, 1234u});
EXPECT_TRUE(session->GetStreamById(3) == nullptr);
}
TEST_F(EncapsulatedWebTransportTest, SendStopSending) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
EXPECT_CALL(*visitor_, OnIncomingUnidirectionalStreamAvailable());
ProcessIncomingCapsule(quiche::WebTransportStreamDataCapsule{3, "", true});
Stream* stream = session->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
EXPECT_CALL(*this, OnCapsule(_)).WillOnce([&](const Capsule& capsule) {
EXPECT_EQ(capsule.capsule_type(), CapsuleType::WT_STOP_SENDING);
EXPECT_EQ(capsule.web_transport_stop_sending().stream_id, 3u);
EXPECT_EQ(capsule.web_transport_stop_sending().error_code, 1234u);
return true;
});
stream->SendStopSending(1234u);
bool deleted = false;
EXPECT_CALL(*visitor, OnDelete()).WillOnce([&] { deleted = true; });
session->GarbageCollectStreams();
EXPECT_TRUE(deleted);
}
TEST_F(EncapsulatedWebTransportTest, ReceiveStopSending) {
std::unique_ptr<EncapsulatedSession> session =
CreateTransport(Perspective::kClient);
DefaultHandshakeForClient(*session);
Stream* stream = session->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
MockStreamVisitorWithDestructor* visitor = SetupVisitor(*stream);
EXPECT_CALL(*visitor, OnStopSendingReceived(1234u));
EXPECT_TRUE(session->GetStreamById(2) != nullptr);
ProcessIncomingCapsule(quiche::WebTransportStopSendingCapsule{2u, 1234u});
EXPECT_TRUE(session->GetStreamById(2) == nullptr);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/encapsulated/encapsulated_web_transport.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/encapsulated/encapsulated_web_transport_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
518e59a4-3440-4e8e-8be3-93932fb4ac2c | cpp | google/quiche | hpack_block_builder | quiche/http2/test_tools/hpack_block_builder.cc | quiche/http2/test_tools/hpack_block_builder_test.cc | #include "quiche/http2/test_tools/hpack_block_builder.h"
#include "quiche/http2/hpack/varint/hpack_varint_encoder.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
void HpackBlockBuilder::AppendHighBitsAndVarint(uint8_t high_bits,
uint8_t prefix_length,
uint64_t varint) {
EXPECT_LE(3, prefix_length);
EXPECT_LE(prefix_length, 8);
HpackVarintEncoder::Encode(high_bits, prefix_length, varint, &buffer_);
}
void HpackBlockBuilder::AppendEntryTypeAndVarint(HpackEntryType entry_type,
uint64_t varint) {
uint8_t high_bits;
uint8_t prefix_length;
switch (entry_type) {
case HpackEntryType::kIndexedHeader:
high_bits = 0x80;
prefix_length = 7;
break;
case HpackEntryType::kDynamicTableSizeUpdate:
high_bits = 0x20;
prefix_length = 5;
break;
case HpackEntryType::kIndexedLiteralHeader:
high_bits = 0x40;
prefix_length = 6;
break;
case HpackEntryType::kUnindexedLiteralHeader:
high_bits = 0x00;
prefix_length = 4;
break;
case HpackEntryType::kNeverIndexedLiteralHeader:
high_bits = 0x10;
prefix_length = 4;
break;
default:
QUICHE_BUG(http2_bug_110_1) << "Unreached, entry_type=" << entry_type;
high_bits = 0;
prefix_length = 0;
break;
}
AppendHighBitsAndVarint(high_bits, prefix_length, varint);
}
void HpackBlockBuilder::AppendString(bool is_huffman_encoded,
absl::string_view str) {
uint8_t high_bits = is_huffman_encoded ? 0x80 : 0;
uint8_t prefix_length = 7;
AppendHighBitsAndVarint(high_bits, prefix_length, str.size());
buffer_.append(str.data(), str.size());
}
}
} | #include "quiche/http2/test_tools/hpack_block_builder.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
const bool kUncompressed = false;
const bool kCompressed = true;
const uint32_t kStaticTableMethodGET = 2;
const uint32_t kStaticTablePathSlash = 4;
const uint32_t kStaticTableSchemeHttp = 6;
TEST(HpackBlockBuilderTest, ExamplesFromSpecC2) {
{
HpackBlockBuilder b;
b.AppendLiteralNameAndValue(HpackEntryType::kIndexedLiteralHeader,
kUncompressed, "custom-key", kUncompressed,
"custom-header");
EXPECT_EQ(26u, b.size());
const char kExpected[] =
"\x40"
"\x0a"
"custom-key"
"\x0d"
"custom-header";
EXPECT_EQ(kExpected, b.buffer());
}
{
HpackBlockBuilder b;
b.AppendNameIndexAndLiteralValue(HpackEntryType::kUnindexedLiteralHeader, 4,
kUncompressed, "/sample/path");
EXPECT_EQ(14u, b.size());
const char kExpected[] =
"\x04"
"\x0c"
"/sample/path";
EXPECT_EQ(kExpected, b.buffer());
}
{
HpackBlockBuilder b;
b.AppendLiteralNameAndValue(HpackEntryType::kNeverIndexedLiteralHeader,
kUncompressed, "password", kUncompressed,
"secret");
EXPECT_EQ(17u, b.size());
const char kExpected[] =
"\x10"
"\x08"
"password"
"\x06"
"secret";
EXPECT_EQ(kExpected, b.buffer());
}
{
HpackBlockBuilder b;
b.AppendIndexedHeader(2);
EXPECT_EQ(1u, b.size());
const char kExpected[] = "\x82";
EXPECT_EQ(kExpected, b.buffer());
}
}
TEST(HpackBlockBuilderTest, ExamplesFromSpecC3) {
{
HpackBlockBuilder b;
b.AppendIndexedHeader(2);
b.AppendIndexedHeader(6);
b.AppendIndexedHeader(4);
b.AppendNameIndexAndLiteralValue(HpackEntryType::kIndexedLiteralHeader, 1,
kUncompressed, "www.example.com");
EXPECT_EQ(20u, b.size());
std::string expected;
ASSERT_TRUE(absl::HexStringToBytes(
"828684410f7777772e6578616d706c652e636f6d", &expected));
EXPECT_EQ(expected, b.buffer());
}
}
TEST(HpackBlockBuilderTest, ExamplesFromSpecC4) {
{
HpackBlockBuilder b;
b.AppendIndexedHeader(kStaticTableMethodGET);
b.AppendIndexedHeader(kStaticTableSchemeHttp);
b.AppendIndexedHeader(kStaticTablePathSlash);
const char kHuffmanWwwExampleCom[] = {'\xf1', '\xe3', '\xc2', '\xe5',
'\xf2', '\x3a', '\x6b', '\xa0',
'\xab', '\x90', '\xf4', '\xff'};
b.AppendNameIndexAndLiteralValue(
HpackEntryType::kIndexedLiteralHeader, 1, kCompressed,
absl::string_view(kHuffmanWwwExampleCom, sizeof kHuffmanWwwExampleCom));
EXPECT_EQ(17u, b.size());
std::string expected;
ASSERT_TRUE(absl::HexStringToBytes("828684418cf1e3c2e5f23a6ba0ab90f4ff",
&expected));
EXPECT_EQ(expected, b.buffer());
}
}
TEST(HpackBlockBuilderTest, DynamicTableSizeUpdate) {
{
HpackBlockBuilder b;
b.AppendDynamicTableSizeUpdate(0);
EXPECT_EQ(1u, b.size());
const char kData[] = {'\x20'};
absl::string_view expected(kData, sizeof kData);
EXPECT_EQ(expected, b.buffer());
}
{
HpackBlockBuilder b;
b.AppendDynamicTableSizeUpdate(4096);
EXPECT_EQ(3u, b.size());
const char kData[] = {'\x3f', '\xe1', '\x1f'};
absl::string_view expected(kData, sizeof kData);
EXPECT_EQ(expected, b.buffer());
}
{
HpackBlockBuilder b;
b.AppendDynamicTableSizeUpdate(1000000000000);
EXPECT_EQ(7u, b.size());
const char kData[] = {'\x3f', '\xe1', '\x9f', '\x94',
'\xa5', '\x8d', '\x1d'};
absl::string_view expected(kData, sizeof kData);
EXPECT_EQ(expected, b.buffer());
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/hpack_block_builder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/hpack_block_builder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
61d551f4-8228-487d-8b81-1ef3db72f442 | cpp | google/quiche | quic_stream_id_manager | quiche/quic/core/quic_stream_id_manager.cc | quiche/quic/core/quic_stream_id_manager_test.cc | #include "quiche/quic/core/quic_stream_id_manager.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
#define ENDPOINT \
(perspective_ == Perspective::IS_SERVER ? " Server: " : " Client: ")
QuicStreamIdManager::QuicStreamIdManager(
DelegateInterface* delegate, bool unidirectional, Perspective perspective,
ParsedQuicVersion version, QuicStreamCount max_allowed_outgoing_streams,
QuicStreamCount max_allowed_incoming_streams)
: delegate_(delegate),
unidirectional_(unidirectional),
perspective_(perspective),
version_(version),
outgoing_max_streams_(max_allowed_outgoing_streams),
next_outgoing_stream_id_(GetFirstOutgoingStreamId()),
outgoing_stream_count_(0),
incoming_actual_max_streams_(max_allowed_incoming_streams),
incoming_advertised_max_streams_(max_allowed_incoming_streams),
incoming_initial_max_open_streams_(max_allowed_incoming_streams),
incoming_stream_count_(0),
largest_peer_created_stream_id_(
QuicUtils::GetInvalidStreamId(version.transport_version)),
stop_increasing_incoming_max_streams_(false) {}
QuicStreamIdManager::~QuicStreamIdManager() {}
bool QuicStreamIdManager::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& frame, std::string* error_details) {
QUICHE_DCHECK_EQ(frame.unidirectional, unidirectional_);
if (frame.stream_count > incoming_advertised_max_streams_) {
*error_details = absl::StrCat(
"StreamsBlockedFrame's stream count ", frame.stream_count,
" exceeds incoming max stream ", incoming_advertised_max_streams_);
return false;
}
QUICHE_DCHECK_LE(incoming_advertised_max_streams_,
incoming_actual_max_streams_);
if (incoming_advertised_max_streams_ == incoming_actual_max_streams_) {
return true;
}
if (frame.stream_count < incoming_actual_max_streams_ &&
delegate_->CanSendMaxStreams()) {
SendMaxStreamsFrame();
}
return true;
}
bool QuicStreamIdManager::MaybeAllowNewOutgoingStreams(
QuicStreamCount max_open_streams) {
if (max_open_streams <= outgoing_max_streams_) {
return false;
}
outgoing_max_streams_ =
std::min(max_open_streams, QuicUtils::GetMaxStreamCount());
return true;
}
void QuicStreamIdManager::SetMaxOpenIncomingStreams(
QuicStreamCount max_open_streams) {
QUIC_BUG_IF(quic_bug_12413_1, incoming_stream_count_ > 0)
<< "non-zero incoming stream count " << incoming_stream_count_
<< " when setting max incoming stream to " << max_open_streams;
QUIC_DLOG_IF(WARNING, incoming_initial_max_open_streams_ != max_open_streams)
<< absl::StrCat(unidirectional_ ? "unidirectional " : "bidirectional: ",
"incoming stream limit changed from ",
incoming_initial_max_open_streams_, " to ",
max_open_streams);
incoming_actual_max_streams_ = max_open_streams;
incoming_advertised_max_streams_ = max_open_streams;
incoming_initial_max_open_streams_ = max_open_streams;
}
void QuicStreamIdManager::MaybeSendMaxStreamsFrame() {
int divisor = GetQuicFlag(quic_max_streams_window_divisor);
if (divisor > 0) {
if ((incoming_advertised_max_streams_ - incoming_stream_count_) >
(incoming_initial_max_open_streams_ / divisor)) {
return;
}
}
if (delegate_->CanSendMaxStreams() &&
incoming_advertised_max_streams_ < incoming_actual_max_streams_) {
SendMaxStreamsFrame();
}
}
void QuicStreamIdManager::SendMaxStreamsFrame() {
QUIC_BUG_IF(quic_bug_12413_2,
incoming_advertised_max_streams_ >= incoming_actual_max_streams_);
incoming_advertised_max_streams_ = incoming_actual_max_streams_;
delegate_->SendMaxStreams(incoming_advertised_max_streams_, unidirectional_);
}
void QuicStreamIdManager::OnStreamClosed(QuicStreamId stream_id) {
QUICHE_DCHECK_NE(QuicUtils::IsBidirectionalStreamId(stream_id, version_),
unidirectional_);
if (QuicUtils::IsOutgoingStreamId(version_, stream_id, perspective_)) {
return;
}
if (incoming_actual_max_streams_ == QuicUtils::GetMaxStreamCount()) {
return;
}
if (!stop_increasing_incoming_max_streams_) {
incoming_actual_max_streams_++;
MaybeSendMaxStreamsFrame();
}
}
QuicStreamId QuicStreamIdManager::GetNextOutgoingStreamId() {
QUIC_BUG_IF(quic_bug_12413_3, outgoing_stream_count_ >= outgoing_max_streams_)
<< "Attempt to allocate a new outgoing stream that would exceed the "
"limit ("
<< outgoing_max_streams_ << ")";
QuicStreamId id = next_outgoing_stream_id_;
next_outgoing_stream_id_ +=
QuicUtils::StreamIdDelta(version_.transport_version);
outgoing_stream_count_++;
return id;
}
bool QuicStreamIdManager::CanOpenNextOutgoingStream() const {
QUICHE_DCHECK(VersionHasIetfQuicFrames(version_.transport_version));
return outgoing_stream_count_ < outgoing_max_streams_;
}
bool QuicStreamIdManager::MaybeIncreaseLargestPeerStreamId(
const QuicStreamId stream_id, std::string* error_details) {
QUICHE_DCHECK_NE(QuicUtils::IsBidirectionalStreamId(stream_id, version_),
unidirectional_);
QUICHE_DCHECK_NE(QuicUtils::IsServerInitiatedStreamId(
version_.transport_version, stream_id),
perspective_ == Perspective::IS_SERVER);
if (available_streams_.erase(stream_id) == 1) {
return true;
}
if (largest_peer_created_stream_id_ !=
QuicUtils::GetInvalidStreamId(version_.transport_version)) {
QUICHE_DCHECK_GT(stream_id, largest_peer_created_stream_id_);
}
const QuicStreamCount delta =
QuicUtils::StreamIdDelta(version_.transport_version);
const QuicStreamId least_new_stream_id =
largest_peer_created_stream_id_ ==
QuicUtils::GetInvalidStreamId(version_.transport_version)
? GetFirstIncomingStreamId()
: largest_peer_created_stream_id_ + delta;
const QuicStreamCount stream_count_increment =
(stream_id - least_new_stream_id) / delta + 1;
if (incoming_stream_count_ + stream_count_increment >
incoming_advertised_max_streams_) {
QUIC_DLOG(INFO) << ENDPOINT
<< "Failed to create a new incoming stream with id:"
<< stream_id << ", reaching MAX_STREAMS limit: "
<< incoming_advertised_max_streams_ << ".";
*error_details = absl::StrCat("Stream id ", stream_id,
" would exceed stream count limit ",
incoming_advertised_max_streams_);
return false;
}
for (QuicStreamId id = least_new_stream_id; id < stream_id; id += delta) {
available_streams_.insert(id);
}
incoming_stream_count_ += stream_count_increment;
largest_peer_created_stream_id_ = stream_id;
return true;
}
bool QuicStreamIdManager::IsAvailableStream(QuicStreamId id) const {
QUICHE_DCHECK_NE(QuicUtils::IsBidirectionalStreamId(id, version_),
unidirectional_);
if (QuicUtils::IsOutgoingStreamId(version_, id, perspective_)) {
return id >= next_outgoing_stream_id_;
}
return largest_peer_created_stream_id_ ==
QuicUtils::GetInvalidStreamId(version_.transport_version) ||
id > largest_peer_created_stream_id_ ||
available_streams_.contains(id);
}
QuicStreamId QuicStreamIdManager::GetFirstOutgoingStreamId() const {
return (unidirectional_) ? QuicUtils::GetFirstUnidirectionalStreamId(
version_.transport_version, perspective_)
: QuicUtils::GetFirstBidirectionalStreamId(
version_.transport_version, perspective_);
}
QuicStreamId QuicStreamIdManager::GetFirstIncomingStreamId() const {
return (unidirectional_) ? QuicUtils::GetFirstUnidirectionalStreamId(
version_.transport_version,
QuicUtils::InvertPerspective(perspective_))
: QuicUtils::GetFirstBidirectionalStreamId(
version_.transport_version,
QuicUtils::InvertPerspective(perspective_));
}
QuicStreamCount QuicStreamIdManager::available_incoming_streams() const {
return incoming_advertised_max_streams_ - incoming_stream_count_;
}
} | #include "quiche/quic/core/quic_stream_id_manager.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_stream_id_manager_peer.h"
using testing::_;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
class MockDelegate : public QuicStreamIdManager::DelegateInterface {
public:
MOCK_METHOD(void, SendMaxStreams,
(QuicStreamCount stream_count, bool unidirectional), (override));
MOCK_METHOD(bool, CanSendMaxStreams, (), (override));
};
struct TestParams {
TestParams(ParsedQuicVersion version, Perspective perspective,
bool is_unidirectional)
: version(version),
perspective(perspective),
is_unidirectional(is_unidirectional) {}
ParsedQuicVersion version;
Perspective perspective;
bool is_unidirectional;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version), "_",
(p.perspective == Perspective::IS_CLIENT ? "Client" : "Server"),
(p.is_unidirectional ? "Unidirectional" : "Bidirectional"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (const ParsedQuicVersion& version : AllSupportedVersions()) {
if (!version.HasIetfQuicFrames()) {
continue;
}
for (Perspective perspective :
{Perspective::IS_CLIENT, Perspective::IS_SERVER}) {
for (bool is_unidirectional : {true, false}) {
params.push_back(TestParams(version, perspective, is_unidirectional));
}
}
}
return params;
}
class QuicStreamIdManagerTest : public QuicTestWithParam<TestParams> {
protected:
QuicStreamIdManagerTest()
: stream_id_manager_(&delegate_, IsUnidirectional(), perspective(),
GetParam().version, 0,
kDefaultMaxStreamsPerConnection) {
QUICHE_DCHECK(VersionHasIetfQuicFrames(transport_version()));
}
QuicTransportVersion transport_version() const {
return GetParam().version.transport_version;
}
QuicStreamId GetNthIncomingStreamId(int n) {
return QuicUtils::StreamIdDelta(transport_version()) * n +
(IsUnidirectional()
? QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(),
QuicUtils::InvertPerspective(perspective()))
: QuicUtils::GetFirstBidirectionalStreamId(
transport_version(),
QuicUtils::InvertPerspective(perspective())));
}
bool IsUnidirectional() { return GetParam().is_unidirectional; }
Perspective perspective() { return GetParam().perspective; }
StrictMock<MockDelegate> delegate_;
QuicStreamIdManager stream_id_manager_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicStreamIdManagerTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicStreamIdManagerTest, Initialization) {
EXPECT_EQ(0u, stream_id_manager_.outgoing_max_streams());
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
stream_id_manager_.incoming_advertised_max_streams());
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
stream_id_manager_.incoming_initial_max_open_streams());
}
TEST_P(QuicStreamIdManagerTest, CheckMaxStreamsWindowForSingleStream) {
stream_id_manager_.SetMaxOpenIncomingStreams(1);
EXPECT_EQ(1u, stream_id_manager_.incoming_initial_max_open_streams());
EXPECT_EQ(1u, stream_id_manager_.incoming_actual_max_streams());
}
TEST_P(QuicStreamIdManagerTest, CheckMaxStreamsBadValuesOverMaxFailsOutgoing) {
QuicStreamCount implementation_max = QuicUtils::GetMaxStreamCount();
EXPECT_LT(stream_id_manager_.outgoing_max_streams(), implementation_max);
EXPECT_TRUE(
stream_id_manager_.MaybeAllowNewOutgoingStreams(implementation_max + 1));
EXPECT_EQ(implementation_max, stream_id_manager_.outgoing_max_streams());
}
TEST_P(QuicStreamIdManagerTest, ProcessStreamsBlockedOk) {
QuicStreamCount stream_count =
stream_id_manager_.incoming_initial_max_open_streams();
QuicStreamsBlockedFrame frame(0, stream_count - 1, IsUnidirectional());
EXPECT_CALL(delegate_, SendMaxStreams(stream_count, IsUnidirectional()))
.Times(0);
std::string error_details;
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
}
TEST_P(QuicStreamIdManagerTest, ProcessStreamsBlockedNoOp) {
QuicStreamCount stream_count =
stream_id_manager_.incoming_initial_max_open_streams();
QuicStreamsBlockedFrame frame(0, stream_count, IsUnidirectional());
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
}
TEST_P(QuicStreamIdManagerTest, ProcessStreamsBlockedTooBig) {
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
QuicStreamCount stream_count =
stream_id_manager_.incoming_initial_max_open_streams() + 1;
QuicStreamsBlockedFrame frame(0, stream_count, IsUnidirectional());
std::string error_details;
EXPECT_FALSE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
EXPECT_EQ(
error_details,
"StreamsBlockedFrame's stream count 101 exceeds incoming max stream 100");
}
TEST_P(QuicStreamIdManagerTest, IsIncomingStreamIdValidBelowLimit) {
QuicStreamId stream_id = GetNthIncomingStreamId(
stream_id_manager_.incoming_actual_max_streams() - 2);
EXPECT_TRUE(
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id, nullptr));
}
TEST_P(QuicStreamIdManagerTest, IsIncomingStreamIdValidAtLimit) {
QuicStreamId stream_id = GetNthIncomingStreamId(
stream_id_manager_.incoming_actual_max_streams() - 1);
EXPECT_TRUE(
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id, nullptr));
}
TEST_P(QuicStreamIdManagerTest, IsIncomingStreamIdInValidAboveLimit) {
QuicStreamId stream_id =
GetNthIncomingStreamId(stream_id_manager_.incoming_actual_max_streams());
std::string error_details;
EXPECT_FALSE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
stream_id, &error_details));
EXPECT_EQ(error_details,
absl::StrCat("Stream id ", stream_id,
" would exceed stream count limit 100"));
}
TEST_P(QuicStreamIdManagerTest, OnStreamsBlockedFrame) {
QuicStreamCount advertised_stream_count =
stream_id_manager_.incoming_advertised_max_streams();
QuicStreamsBlockedFrame frame;
frame.unidirectional = IsUnidirectional();
frame.stream_count = advertised_stream_count;
std::string error_details;
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
frame.stream_count = advertised_stream_count + 1;
EXPECT_FALSE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
EXPECT_EQ(
error_details,
"StreamsBlockedFrame's stream count 101 exceeds incoming max stream 100");
QuicStreamCount actual_stream_count =
stream_id_manager_.incoming_actual_max_streams();
stream_id_manager_.OnStreamClosed(
QuicStreamIdManagerPeer::GetFirstIncomingStreamId(&stream_id_manager_));
EXPECT_EQ(actual_stream_count + 1u,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(stream_id_manager_.incoming_actual_max_streams(),
stream_id_manager_.incoming_advertised_max_streams() + 1u);
frame.stream_count = advertised_stream_count;
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(true));
EXPECT_CALL(delegate_,
SendMaxStreams(stream_id_manager_.incoming_actual_max_streams(),
IsUnidirectional()));
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
EXPECT_EQ(stream_id_manager_.incoming_actual_max_streams(),
stream_id_manager_.incoming_advertised_max_streams());
}
TEST_P(QuicStreamIdManagerTest, OnStreamsBlockedFrameCantSend) {
QuicStreamCount advertised_stream_count =
stream_id_manager_.incoming_advertised_max_streams();
QuicStreamsBlockedFrame frame;
frame.unidirectional = IsUnidirectional();
QuicStreamCount actual_stream_count =
stream_id_manager_.incoming_actual_max_streams();
stream_id_manager_.OnStreamClosed(
QuicStreamIdManagerPeer::GetFirstIncomingStreamId(&stream_id_manager_));
EXPECT_EQ(actual_stream_count + 1u,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(stream_id_manager_.incoming_actual_max_streams(),
stream_id_manager_.incoming_advertised_max_streams() + 1u);
frame.stream_count = advertised_stream_count;
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(false));
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
const QuicStreamCount advertised_max_streams =
stream_id_manager_.incoming_advertised_max_streams();
std::string error_details;
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
EXPECT_EQ(advertised_max_streams,
stream_id_manager_.incoming_advertised_max_streams());
}
TEST_P(QuicStreamIdManagerTest, GetNextOutgoingStream) {
size_t number_of_streams = kDefaultMaxStreamsPerConnection;
EXPECT_TRUE(
stream_id_manager_.MaybeAllowNewOutgoingStreams(number_of_streams));
QuicStreamId stream_id = IsUnidirectional()
? QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), perspective())
: QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), perspective());
EXPECT_EQ(number_of_streams, stream_id_manager_.outgoing_max_streams());
while (number_of_streams) {
EXPECT_TRUE(stream_id_manager_.CanOpenNextOutgoingStream());
EXPECT_EQ(stream_id, stream_id_manager_.GetNextOutgoingStreamId());
stream_id += QuicUtils::StreamIdDelta(transport_version());
number_of_streams--;
}
EXPECT_FALSE(stream_id_manager_.CanOpenNextOutgoingStream());
EXPECT_QUIC_BUG(
stream_id_manager_.GetNextOutgoingStreamId(),
"Attempt to allocate a new outgoing stream that would exceed the limit");
}
TEST_P(QuicStreamIdManagerTest, MaybeIncreaseLargestPeerStreamId) {
QuicStreamId max_stream_id = GetNthIncomingStreamId(
stream_id_manager_.incoming_actual_max_streams() - 1);
EXPECT_TRUE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(max_stream_id,
nullptr));
QuicStreamId first_stream_id = GetNthIncomingStreamId(0);
EXPECT_TRUE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
first_stream_id, nullptr));
std::string error_details;
EXPECT_FALSE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
max_stream_id + QuicUtils::StreamIdDelta(transport_version()),
&error_details));
EXPECT_EQ(error_details,
absl::StrCat(
"Stream id ",
max_stream_id + QuicUtils::StreamIdDelta(transport_version()),
" would exceed stream count limit 100"));
}
TEST_P(QuicStreamIdManagerTest, MaxStreamsWindow) {
int stream_count = stream_id_manager_.incoming_initial_max_open_streams() /
GetQuicFlag(quic_max_streams_window_divisor) -
1;
EXPECT_CALL(delegate_, CanSendMaxStreams()).Times(0);
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
QuicStreamId stream_id = GetNthIncomingStreamId(0);
size_t old_available_incoming_streams =
stream_id_manager_.available_incoming_streams();
auto i = stream_count;
while (i) {
EXPECT_TRUE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id,
nullptr));
old_available_incoming_streams--;
EXPECT_EQ(old_available_incoming_streams,
stream_id_manager_.available_incoming_streams());
i--;
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
stream_id = GetNthIncomingStreamId(0);
QuicStreamCount expected_actual_max =
stream_id_manager_.incoming_actual_max_streams();
QuicStreamCount expected_advertised_max_streams =
stream_id_manager_.incoming_advertised_max_streams();
while (stream_count) {
stream_id_manager_.OnStreamClosed(stream_id);
stream_count--;
stream_id += QuicUtils::StreamIdDelta(transport_version());
expected_actual_max++;
EXPECT_EQ(expected_actual_max,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(expected_advertised_max_streams,
stream_id_manager_.incoming_advertised_max_streams());
}
EXPECT_EQ(old_available_incoming_streams,
stream_id_manager_.available_incoming_streams());
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(true));
EXPECT_CALL(delegate_, SendMaxStreams(_, IsUnidirectional()));
EXPECT_TRUE(
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id, nullptr));
stream_id_manager_.OnStreamClosed(stream_id);
}
TEST_P(QuicStreamIdManagerTest, MaxStreamsWindowCantSend) {
int stream_count = stream_id_manager_.incoming_initial_max_open_streams() /
GetQuicFlag(quic_max_streams_window_divisor) -
1;
EXPECT_CALL(delegate_, CanSendMaxStreams()).Times(0);
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
QuicStreamId stream_id = GetNthIncomingStreamId(0);
size_t old_available_incoming_streams =
stream_id_manager_.available_incoming_streams();
auto i = stream_count;
while (i) {
EXPECT_TRUE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id,
nullptr));
old_available_incoming_streams--;
EXPECT_EQ(old_available_incoming_streams,
stream_id_manager_.available_incoming_streams());
i--;
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
stream_id = GetNthIncomingStreamId(0);
QuicStreamCount expected_actual_max =
stream_id_manager_.incoming_actual_max_streams();
QuicStreamCount expected_advertised_max_streams =
stream_id_manager_.incoming_advertised_max_streams();
while (stream_count) {
stream_id_manager_.OnStreamClosed(stream_id);
stream_count--;
stream_id += QuicUtils::StreamIdDelta(transport_version());
expected_actual_max++;
EXPECT_EQ(expected_actual_max,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(expected_advertised_max_streams,
stream_id_manager_.incoming_advertised_max_streams());
}
EXPECT_EQ(old_available_incoming_streams,
stream_id_manager_.available_incoming_streams());
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(false));
EXPECT_CALL(delegate_, SendMaxStreams(_, IsUnidirectional())).Times(0);
EXPECT_TRUE(
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id, nullptr));
stream_id_manager_.OnStreamClosed(stream_id);
EXPECT_EQ(expected_advertised_max_streams,
stream_id_manager_.incoming_advertised_max_streams());
}
TEST_P(QuicStreamIdManagerTest, MaxStreamsWindowStopsIncreasing) {
QuicStreamId stream_count =
stream_id_manager_.incoming_initial_max_open_streams();
QuicStreamId stream_id = GetNthIncomingStreamId(0);
for (QuicStreamCount i = 0; i < stream_count; ++i) {
EXPECT_TRUE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(stream_id,
nullptr));
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
stream_id_manager_.StopIncreasingIncomingMaxStreams();
EXPECT_CALL(delegate_, CanSendMaxStreams()).Times(0);
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
stream_id = GetNthIncomingStreamId(0);
QuicStreamCount expected_actual_max =
stream_id_manager_.incoming_actual_max_streams();
QuicStreamCount expected_advertised_max_streams =
stream_id_manager_.incoming_advertised_max_streams();
for (QuicStreamCount i = 0; i < stream_count; ++i) {
stream_id_manager_.OnStreamClosed(stream_id);
stream_id += QuicUtils::StreamIdDelta(transport_version());
EXPECT_EQ(expected_actual_max,
stream_id_manager_.incoming_actual_max_streams());
EXPECT_EQ(expected_advertised_max_streams,
stream_id_manager_.incoming_advertised_max_streams());
}
}
TEST_P(QuicStreamIdManagerTest, StreamsBlockedEdgeConditions) {
QuicStreamsBlockedFrame frame;
frame.unidirectional = IsUnidirectional();
EXPECT_CALL(delegate_, CanSendMaxStreams()).Times(0);
EXPECT_CALL(delegate_, SendMaxStreams(_, _)).Times(0);
stream_id_manager_.SetMaxOpenIncomingStreams(0);
frame.stream_count = 0;
std::string error_details;
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(true));
EXPECT_CALL(delegate_, SendMaxStreams(123u, IsUnidirectional()));
QuicStreamIdManagerPeer::set_incoming_actual_max_streams(&stream_id_manager_,
123);
frame.stream_count = 0;
EXPECT_TRUE(stream_id_manager_.OnStreamsBlockedFrame(frame, &error_details));
}
TEST_P(QuicStreamIdManagerTest, MaxStreamsSlidingWindow) {
QuicStreamCount first_advert =
stream_id_manager_.incoming_advertised_max_streams();
int i =
static_cast<int>(stream_id_manager_.incoming_initial_max_open_streams() /
GetQuicFlag(quic_max_streams_window_divisor));
QuicStreamId id =
QuicStreamIdManagerPeer::GetFirstIncomingStreamId(&stream_id_manager_);
EXPECT_CALL(delegate_, CanSendMaxStreams()).WillOnce(testing::Return(true));
EXPECT_CALL(delegate_, SendMaxStreams(first_advert + i, IsUnidirectional()));
while (i) {
EXPECT_TRUE(
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(id, nullptr));
stream_id_manager_.OnStreamClosed(id);
i--;
id += QuicUtils::StreamIdDelta(transport_version());
}
}
TEST_P(QuicStreamIdManagerTest, NewStreamDoesNotExceedLimit) {
EXPECT_TRUE(stream_id_manager_.MaybeAllowNewOutgoingStreams(100));
size_t stream_count = stream_id_manager_.outgoing_max_streams();
EXPECT_NE(0u, stream_count);
while (stream_count) {
EXPECT_TRUE(stream_id_manager_.CanOpenNextOutgoingStream());
stream_id_manager_.GetNextOutgoingStreamId();
stream_count--;
}
EXPECT_EQ(stream_id_manager_.outgoing_stream_count(),
stream_id_manager_.outgoing_max_streams());
EXPECT_FALSE(stream_id_manager_.CanOpenNextOutgoingStream());
}
TEST_P(QuicStreamIdManagerTest, AvailableStreams) {
stream_id_manager_.MaybeIncreaseLargestPeerStreamId(GetNthIncomingStreamId(3),
nullptr);
EXPECT_TRUE(stream_id_manager_.IsAvailableStream(GetNthIncomingStreamId(1)));
EXPECT_TRUE(stream_id_manager_.IsAvailableStream(GetNthIncomingStreamId(2)));
EXPECT_FALSE(stream_id_manager_.IsAvailableStream(GetNthIncomingStreamId(3)));
EXPECT_TRUE(stream_id_manager_.IsAvailableStream(GetNthIncomingStreamId(4)));
}
TEST_P(QuicStreamIdManagerTest, ExtremeMaybeIncreaseLargestPeerStreamId) {
QuicStreamId too_big_stream_id = GetNthIncomingStreamId(
stream_id_manager_.incoming_actual_max_streams() + 20);
std::string error_details;
EXPECT_FALSE(stream_id_manager_.MaybeIncreaseLargestPeerStreamId(
too_big_stream_id, &error_details));
EXPECT_EQ(error_details,
absl::StrCat("Stream id ", too_big_stream_id,
" would exceed stream count limit 100"));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_id_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_stream_id_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1845ed00-285e-4dc8-82c8-8c2389d5ebe9 | cpp | google/quiche | data_payload_decoder | quiche/http2/decoder/payload_decoders/data_payload_decoder.cc | quiche/http2/decoder/payload_decoders/data_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/data_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
DataPayloadDecoder::PayloadState v) {
switch (v) {
case DataPayloadDecoder::PayloadState::kReadPadLength:
return out << "kReadPadLength";
case DataPayloadDecoder::PayloadState::kReadPayload:
return out << "kReadPayload";
case DataPayloadDecoder::PayloadState::kSkipPadding:
return out << "kSkipPadding";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_174_1)
<< "Invalid DataPayloadDecoder::PayloadState: " << unknown;
return out << "DataPayloadDecoder::PayloadState(" << unknown << ")";
}
DecodeStatus DataPayloadDecoder::StartDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "DataPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::DATA, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::END_STREAM |
Http2FrameFlag::PADDED));
QUICHE_DVLOG(2) << "StartDecodingPayload total_length=" << total_length;
if (!frame_header.IsPadded()) {
QUICHE_DVLOG(2) << "StartDecodingPayload !IsPadded";
if (db->Remaining() == total_length) {
QUICHE_DVLOG(2) << "StartDecodingPayload all present";
state->listener()->OnDataStart(frame_header);
if (total_length > 0) {
state->listener()->OnDataPayload(db->cursor(), total_length);
db->AdvanceCursor(total_length);
}
state->listener()->OnDataEnd();
return DecodeStatus::kDecodeDone;
}
payload_state_ = PayloadState::kReadPayload;
} else {
payload_state_ = PayloadState::kReadPadLength;
}
state->InitializeRemainders();
state->listener()->OnDataStart(frame_header);
return ResumeDecodingPayload(state, db);
}
DecodeStatus DataPayloadDecoder::ResumeDecodingPayload(FrameDecoderState* state,
DecodeBuffer* db) {
QUICHE_DVLOG(2) << "DataPayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::DATA, frame_header.type);
QUICHE_DCHECK_LE(state->remaining_payload_and_padding(),
frame_header.payload_length);
QUICHE_DCHECK_LE(db->Remaining(), state->remaining_payload_and_padding());
DecodeStatus status;
size_t avail;
switch (payload_state_) {
case PayloadState::kReadPadLength:
status = state->ReadPadLength(db, true);
if (status != DecodeStatus::kDecodeDone) {
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadPayload:
avail = state->AvailablePayload(db);
if (avail > 0) {
state->listener()->OnDataPayload(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadPayload;
return DecodeStatus::kDecodeInProgress;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kSkipPadding:
if (state->SkipPadding(db)) {
state->listener()->OnDataEnd();
return DecodeStatus::kDecodeDone;
}
payload_state_ = PayloadState::kSkipPadding;
return DecodeStatus::kDecodeInProgress;
}
QUICHE_BUG(http2_bug_174_2) << "PayloadState: " << payload_state_;
return DecodeStatus::kDecodeError;
}
} | #include "quiche/http2/decoder/payload_decoders/data_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class DataPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() { return Http2FrameType::DATA; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() {
return Http2FrameFlag::PADDED;
}
};
namespace {
struct Listener : public FramePartsCollector {
void OnDataStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnDataStart: " << header;
StartFrame(header)->OnDataStart(header);
}
void OnDataPayload(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnDataPayload: len=" << len;
CurrentFrame()->OnDataPayload(data, len);
}
void OnDataEnd() override {
QUICHE_VLOG(1) << "OnDataEnd";
EndFrame()->OnDataEnd();
}
void OnPadLength(size_t pad_length) override {
QUICHE_VLOG(1) << "OnPadLength: " << pad_length;
CurrentFrame()->OnPadLength(pad_length);
}
void OnPadding(const char* padding, size_t skipped_length) override {
QUICHE_VLOG(1) << "OnPadding: " << skipped_length;
CurrentFrame()->OnPadding(padding, skipped_length);
}
void OnPaddingTooLong(const Http2FrameHeader& header,
size_t missing_length) override {
QUICHE_VLOG(1) << "OnPaddingTooLong: " << header
<< " missing_length: " << missing_length;
EndFrame()->OnPaddingTooLong(header, missing_length);
}
};
class DataPayloadDecoderTest
: public AbstractPaddablePayloadDecoderTest<
DataPayloadDecoder, DataPayloadDecoderPeer, Listener> {
protected:
AssertionResult CreateAndDecodeDataOfSize(size_t data_size) {
Reset();
uint8_t flags = RandFlags();
std::string data_payload = Random().RandString(data_size);
frame_builder_.Append(data_payload);
MaybeAppendTrailingPadding();
Http2FrameHeader frame_header(frame_builder_.size(), Http2FrameType::DATA,
flags, RandStreamId());
set_frame_header(frame_header);
ScrubFlagsOfHeader(&frame_header);
FrameParts expected(frame_header, data_payload, total_pad_length_);
return DecodePayloadAndValidateSeveralWays(frame_builder_.buffer(),
expected);
}
};
INSTANTIATE_TEST_SUITE_P(VariousPadLengths, DataPayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 4, 254, 255, 256));
TEST_P(DataPayloadDecoderTest, VariousDataPayloadSizes) {
for (size_t data_size : {0, 1, 2, 3, 255, 256, 1024}) {
EXPECT_TRUE(CreateAndDecodeDataOfSize(data_size));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/data_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/data_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
307db5e8-a1cf-4674-a56e-967b9a33e171 | cpp | tensorflow/tensorflow | benchmark_utils | tensorflow/lite/tools/benchmark/benchmark_utils.cc | tensorflow/lite/tools/benchmark/benchmark_utils_test.cc | #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace util {
void SleepForSeconds(double sleep_seconds) {
if (sleep_seconds <= 0.0) {
return;
}
tflite::profiling::time::SleepForMicros(
static_cast<uint64_t>(sleep_seconds * 1e6));
}
}
}
} | #include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace benchmark {
namespace {
TEST(BenchmarkHelpersTest, SleepForNegativeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(-5.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_LT(end_ts - start_ts, 1000000);
}
TEST(BenchmarkHelpersTest, SleepForSomeSeconds) {
const auto start_ts = tflite::profiling::time::NowMicros();
util::SleepForSeconds(2.0);
const auto end_ts = tflite::profiling::time::NowMicros();
EXPECT_GT(end_ts - start_ts, 1900000);
}
TEST(BenchmarkHelpersTest, SplitAndParseFailed) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("hello;world", ';', &results);
EXPECT_FALSE(splitted);
}
TEST(BenchmarkHelpersTest, SplitAndParseString) {
std::vector<std::string> results;
const bool splitted = util::SplitAndParse("hello,world", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ("hello", results[0]);
EXPECT_EQ("world", results[1]);
}
TEST(BenchmarkHelpersTest, SplitAndParseInts) {
std::vector<int> results;
const bool splitted = util::SplitAndParse("1,2", ',', &results);
EXPECT_TRUE(splitted);
EXPECT_EQ(2, results.size());
EXPECT_EQ(1, results[0]);
EXPECT_EQ(2, results[1]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
25ce80e0-cfc7-41c8-8494-1706993cefd2 | cpp | google/tensorstore | neuroglancer_uint64_sharded | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.cc | tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded_test.cc | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/internal/endian.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/async_cache.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/cache_pool_resource.h"
#include "tensorstore/internal/cache/kvs_backed_cache.h"
#include "tensorstore/internal/data_copy_concurrency_resource.h"
#include "tensorstore/internal/estimate_heap_usage/estimate_heap_usage.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/driver.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_decoder.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded_encoder.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_modify_write.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/transaction.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/execution/any_receiver.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/garbage_collection/garbage_collection.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
#include "tensorstore/internal/estimate_heap_usage/std_vector.h"
#include "tensorstore/util/execution/result_sender.h"
namespace tensorstore {
namespace neuroglancer_uint64_sharded {
namespace {
using ::tensorstore::internal::ConvertInvalidArgumentToFailedPrecondition;
using ::tensorstore::internal::IntrusivePtr;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::SupportedFeatures;
class MinishardIndexKeyValueStore : public kvstore::Driver {
public:
explicit MinishardIndexKeyValueStore(kvstore::DriverPtr base,
Executor executor,
std::string key_prefix,
const ShardingSpec& sharding_spec)
: base_(std::move(base)),
executor_(std::move(executor)),
key_prefix_(key_prefix),
sharding_spec_(sharding_spec) {}
Future<ReadResult> Read(Key key, ReadOptions options) override;
std::string DescribeKey(std::string_view key) override {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return tensorstore::StrCat("invalid key ", tensorstore::QuoteString(key));
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
return tensorstore::StrCat(
"minishard ", split_info.minishard, " in ",
base_->DescribeKey(
GetShardKey(sharding_spec_, key_prefix_, split_info.shard)));
}
void GarbageCollectionVisit(
garbage_collection::GarbageCollectionVisitor& visitor) const final {
}
kvstore::Driver* base() { return base_.get(); }
const ShardingSpec& sharding_spec() { return sharding_spec_; }
const std::string& key_prefix() const { return key_prefix_; }
const Executor& executor() const { return executor_; }
kvstore::DriverPtr base_;
Executor executor_;
std::string key_prefix_;
ShardingSpec sharding_spec_;
};
namespace {
using ShardIndex = uint64_t;
using MinishardIndex = uint64_t;
class MinishardIndexReadOperationState;
using MinishardIndexReadOperationStateBase =
internal_kvstore_batch::BatchReadEntry<
MinishardIndexKeyValueStore,
internal_kvstore_batch::ReadRequest<MinishardIndex>,
ShardIndex, kvstore::ReadGenerationConditions>;
;
class MinishardIndexReadOperationState
: public MinishardIndexReadOperationStateBase,
public internal::AtomicReferenceCount<MinishardIndexReadOperationState> {
public:
explicit MinishardIndexReadOperationState(BatchEntryKey&& batch_entry_key_)
: MinishardIndexReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<MinishardIndexReadOperationState>(
1) {}
private:
Batch retry_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<MinishardIndexReadOperationState> self(
this, internal::adopt_object_ref);
retry_batch_ = Batch::New();
auto minishard_fetch_batch = Batch::New();
for (auto& request : request_batch.requests) {
ProcessMinishard(batch, request, minishard_fetch_batch);
}
}
std::string ShardKey() {
const auto& sharding_spec = driver().sharding_spec();
return GetShardKey(sharding_spec, driver().key_prefix(),
std::get<ShardIndex>(batch_entry_key));
}
void ProcessMinishard(Batch::View batch, Request& request,
Batch minishard_fetch_batch) {
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions =
std::get<kvstore::ReadGenerationConditions>(this->batch_entry_key);
kvstore_read_options.staleness_bound = this->request_batch.staleness_bound;
auto key = std::get<MinishardIndex>(request);
kvstore_read_options.byte_range = OptionalByteRangeRequest{
static_cast<int64_t>(key * 16), static_cast<int64_t>((key + 1) * 16)};
kvstore_read_options.batch = batch;
auto shard_index_read_future = this->driver().base()->Read(
this->ShardKey(), std::move(kvstore_read_options));
shard_index_read_future.Force();
shard_index_read_future.ExecuteWhenReady(
[self = internal::IntrusivePtr<MinishardIndexReadOperationState>(this),
minishard_fetch_batch = std::move(minishard_fetch_batch),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
minishard_fetch_batch = std::move(minishard_fetch_batch),
future = std::move(future)] {
OnShardIndexReady(std::move(self), request,
std::move(minishard_fetch_batch),
std::move(future.result()));
});
});
}
static void OnShardIndexReady(
internal::IntrusivePtr<MinishardIndexReadOperationState> self,
Request& request, Batch minishard_fetch_batch,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
const auto set_error = [&](absl::Status status) {
byte_range_request.promise.SetResult(MaybeAnnotateStatus(
ConvertInvalidArgumentToFailedPrecondition(std::move(status)),
"Error retrieving shard index entry"));
};
TENSORSTORE_ASSIGN_OR_RETURN(auto&& read_result, result,
set_error(std::move(_)));
if (
read_result.aborted() ||
read_result.not_found()) {
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range, DecodeShardIndexEntry(read_result.value.Flatten()),
set_error(std::move(_)));
TENSORSTORE_ASSIGN_OR_RETURN(
byte_range,
GetAbsoluteShardByteRange(byte_range, self->driver().sharding_spec()),
set_error(std::move(_)));
if (byte_range.size() == 0) {
read_result.value.Clear();
read_result.state = kvstore::ReadResult::kMissing;
byte_range_request.promise.SetResult(std::move(read_result));
return;
}
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal =
std::move(read_result.stamp.generation);
kvstore_read_options.staleness_bound = self->request_batch.staleness_bound;
kvstore_read_options.byte_range = byte_range;
kvstore_read_options.batch = std::move(minishard_fetch_batch);
auto read_future = self->driver().base()->Read(
self->ShardKey(), std::move(kvstore_read_options));
read_future.Force();
read_future.ExecuteWhenReady(
[self = std::move(self),
&request](ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), &request,
future = std::move(future)]() mutable {
self->OnMinishardIndexReadReady(request,
std::move(future.result()));
});
});
}
void OnMinishardIndexReadReady(Request& request,
Result<kvstore::ReadResult>&& result) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, result,
static_cast<void>(byte_range_request.promise.SetResult(
internal::ConvertInvalidArgumentToFailedPrecondition(
std::move(_)))));
if (read_result.aborted()) {
MakeRequest<MinishardIndexReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key),
kvstore::ReadGenerationConditions(
std::get<kvstore::ReadGenerationConditions>(batch_entry_key)),
retry_batch_, read_result.stamp.time, std::move(request));
return;
}
byte_range_request.promise.SetResult(std::move(read_result));
}
};
}
Future<kvstore::ReadResult> MinishardIndexKeyValueStore::Read(
Key key, ReadOptions options) {
ChunkCombinedShardInfo combined_info;
if (key.size() != sizeof(combined_info)) {
return absl::InvalidArgumentError("Key does not specify a minishard");
}
std::memcpy(&combined_info, key.data(), sizeof(combined_info));
auto split_info = GetSplitShardInfo(sharding_spec_, combined_info);
if (options.byte_range != OptionalByteRangeRequest()) {
return absl::InvalidArgumentError("Byte ranges not supported");
}
auto [promise, future] = PromiseFuturePair<ReadResult>::Make();
MinishardIndexReadOperationState::MakeRequest<
MinishardIndexReadOperationState>(
*this, split_info.shard, std::move(options.generation_conditions),
options.batch, options.staleness_bound,
MinishardIndexReadOperationState::Request{{std::move(promise)},
split_info.minishard});
return std::move(future);
}
class MinishardIndexCache
: public internal::KvsBackedCache<MinishardIndexCache,
internal::AsyncCache> {
using Base =
internal::KvsBackedCache<MinishardIndexCache, internal::AsyncCache>;
public:
using ReadData = std::vector<MinishardIndexEntry>;
class Entry : public Base::Entry {
public:
using OwningCache = MinishardIndexCache;
ChunkSplitShardInfo shard_info() {
ChunkCombinedShardInfo combined_info;
assert(this->key().size() == sizeof(combined_info));
std::memcpy(&combined_info, this->key().data(), sizeof(combined_info));
return GetSplitShardInfo(GetOwningCache(*this).sharding_spec(),
combined_info);
}
size_t ComputeReadDataSizeInBytes(const void* read_data) override {
return internal::EstimateHeapUsage(
*static_cast<const ReadData*>(read_data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
std::shared_ptr<ReadData> read_data;
if (value) {
if (auto result = DecodeMinishardIndexAndAdjustByteRanges(
*value, GetOwningCache(*this).sharding_spec());
result.ok()) {
read_data = std::make_shared<ReadData>(*std::move(result));
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(receiver, std::move(read_data));
});
}
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit MinishardIndexCache(kvstore::DriverPtr base_kvstore,
Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec)
: Base(kvstore::DriverPtr(new MinishardIndexKeyValueStore(
std::move(base_kvstore), executor, std::move(key_prefix),
sharding_spec))) {}
MinishardIndexKeyValueStore* kvstore_driver() {
return static_cast<MinishardIndexKeyValueStore*>(
this->Base::kvstore_driver());
}
const ShardingSpec& sharding_spec() {
return kvstore_driver()->sharding_spec();
}
kvstore::Driver* base_kvstore_driver() { return kvstore_driver()->base(); }
const Executor& executor() { return kvstore_driver()->executor(); }
const std::string& key_prefix() { return kvstore_driver()->key_prefix(); }
};
MinishardAndChunkId GetMinishardAndChunkId(std::string_view key) {
assert(key.size() == 16);
return {absl::big_endian::Load64(key.data()),
{absl::big_endian::Load64(key.data() + 8)}};
}
class ShardedKeyValueStoreWriteCache
: public internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache> {
using Base = internal::KvsBackedCache<ShardedKeyValueStoreWriteCache,
internal::AsyncCache>;
public:
using ReadData = EncodedChunks;
static std::string ShardToKey(ShardIndex shard) {
std::string key;
key.resize(sizeof(ShardIndex));
absl::big_endian::Store64(key.data(), shard);
return key;
}
static ShardIndex KeyToShard(std::string_view key) {
assert(key.size() == sizeof(ShardIndex));
return absl::big_endian::Load64(key.data());
}
class Entry : public Base::Entry {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
ShardIndex shard() { return KeyToShard(key()); }
size_t ComputeReadDataSizeInBytes(const void* data) override {
return internal::EstimateHeapUsage(*static_cast<const ReadData*>(data));
}
void DoDecode(std::optional<absl::Cord> value,
DecodeReceiver receiver) override {
GetOwningCache(*this).executor()(
[this, value = std::move(value),
receiver = std::move(receiver)]() mutable {
EncodedChunks chunks;
if (value) {
if (auto result =
SplitShard(GetOwningCache(*this).sharding_spec(), *value);
result.ok()) {
chunks = *std::move(result);
} else {
execution::set_error(receiver,
ConvertInvalidArgumentToFailedPrecondition(
std::move(result).status()));
return;
}
}
execution::set_value(
receiver, std::make_shared<EncodedChunks>(std::move(chunks)));
});
}
void DoEncode(std::shared_ptr<const EncodedChunks> data,
EncodeReceiver receiver) override {
execution::set_value(
receiver, EncodeShard(GetOwningCache(*this).sharding_spec(), *data));
}
std::string GetKeyValueStoreKey() override {
auto& cache = GetOwningCache(*this);
return GetShardKey(cache.sharding_spec(), cache.key_prefix(),
this->shard());
}
};
class TransactionNode : public Base::TransactionNode,
public internal_kvstore::AtomicMultiPhaseMutation {
public:
using OwningCache = ShardedKeyValueStoreWriteCache;
using Base::TransactionNode::TransactionNode;
absl::Mutex& mutex() override { return this->mutex_; }
void PhaseCommitDone(size_t next_phase) override {}
internal::TransactionState::Node& GetTransactionNode() override {
return *this;
}
void Abort() override {
this->AbortRemainingPhases();
Base::TransactionNode::Abort();
}
std::string DescribeKey(std::string_view key) override {
auto& entry = GetOwningEntry(*this);
auto& cache = GetOwningCache(entry);
auto minishard_and_chunk_id = GetMinishardAndChunkId(key);
return tensorstore::StrCat(
"chunk ", minishard_and_chunk_id.chunk_id.value, " in minishard ",
minishard_and_chunk_id.minishard, " in ",
cache.kvstore_driver()->DescribeKey(entry.GetKeyValueStoreKey()));
}
void DoApply(ApplyOptions options, ApplyReceiver receiver) override;
void AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) override;
void RecordEntryWritebackError(
internal_kvstore::ReadModifyWriteEntry& entry,
absl::Status error) override {
absl::MutexLock lock(&mutex_);
if (apply_status_.ok()) {
apply_status_ = std::move(error);
}
}
void Revoke() override {
Base::TransactionNode::Revoke();
{ UniqueWriterLock(*this); }
this->RevokeAllEntries();
}
void WritebackSuccess(ReadState&& read_state) override;
void WritebackError() override;
void InvalidateReadState() override;
bool MultiPhaseReadsCommitted() override { return this->reads_committed_; }
void Read(
internal_kvstore::ReadModifyWriteEntry& entry,
kvstore::ReadModifyWriteTarget::TransactionalReadOptions&& options,
kvstore::ReadModifyWriteTarget::ReadReceiver&& receiver) override {
this->AsyncCache::TransactionNode::Read({options.staleness_bound})
.ExecuteWhenReady(WithExecutor(
GetOwningCache(*this).executor(),
[&entry,
if_not_equal =
std::move(options.generation_conditions.if_not_equal),
receiver = std::move(receiver)](
ReadyFuture<const void> future) mutable {
if (!future.result().ok()) {
execution::set_error(receiver, future.result().status());
return;
}
execution::submit(HandleShardReadSuccess(entry, if_not_equal),
receiver);
}));
}
static Result<kvstore::ReadResult> HandleShardReadSuccess(
internal_kvstore::ReadModifyWriteEntry& entry,
const StorageGeneration& if_not_equal) {
auto& self = static_cast<TransactionNode&>(entry.multi_phase());
TimestampedStorageGeneration stamp;
std::shared_ptr<const EncodedChunks> encoded_chunks;
{
AsyncCache::ReadLock<EncodedChunks> lock{self};
stamp = lock.stamp();
encoded_chunks = lock.shared_data();
}
if (!StorageGeneration::IsUnknown(stamp.generation) &&
stamp.generation == if_not_equal) {
return kvstore::ReadResult::Unspecified(std::move(stamp));
}
if (StorageGeneration::IsDirty(stamp.generation)) {
stamp.generation =
StorageGeneration::AddLayer(std::move(stamp.generation));
}
auto* chunk =
FindChunk(*encoded_chunks, GetMinishardAndChunkId(entry.key_));
if (!chunk) {
return kvstore::ReadResult::Missing(std::move(stamp));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(
absl::Cord value,
DecodeData(chunk->encoded_data,
GetOwningCache(self).sharding_spec().data_encoding));
return kvstore::ReadResult::Value(std::move(value), std::move(stamp));
}
}
void Writeback(internal_kvstore::ReadModifyWriteEntry& entry,
internal_kvstore::ReadModifyWriteEntry& source_entry,
kvstore::ReadResult&& read_result) override {
auto& value = read_result.value;
if (read_result.state == kvstore::ReadResult::kValue) {
value = EncodeData(value,
GetOwningCache(*this).sharding_spec().data_encoding);
}
internal_kvstore::AtomicMultiPhaseMutation::Writeback(
entry, entry, std::move(read_result));
}
ApplyReceiver apply_receiver_;
ApplyOptions apply_options_;
absl::Status apply_status_;
};
Entry* DoAllocateEntry() final { return new Entry; }
size_t DoGetSizeofEntry() final { return sizeof(Entry); }
TransactionNode* DoAllocateTransactionNode(AsyncCache::Entry& entry) final {
return new TransactionNode(static_cast<Entry&>(entry));
}
explicit ShardedKeyValueStoreWriteCache(
internal::CachePtr<MinishardIndexCache> minishard_index_cache,
GetMaxChunksPerShardFunction get_max_chunks_per_shard)
: Base(kvstore::DriverPtr(minishard_index_cache->base_kvstore_driver())),
minishard_index_cache_(std::move(minishard_index_cache)),
get_max_chunks_per_shard_(std::move(get_max_chunks_per_shard)) {}
const ShardingSpec& sharding_spec() const {
return minishard_index_cache()->sharding_spec();
}
const std::string& key_prefix() const {
return minishard_index_cache()->key_prefix();
}
const internal::CachePtr<MinishardIndexCache>& minishard_index_cache() const {
return minishard_index_cache_;
}
const Executor& executor() { return minishard_index_cache()->executor(); }
internal::CachePtr<MinishardIndexCache> minishard_index_cache_;
GetMaxChunksPerShardFunction get_max_chunks_per_shard_;
};
void ShardedKeyValueStoreWriteCache::TransactionNode::InvalidateReadState() {
Base::TransactionNode::InvalidateReadState();
internal_kvstore::InvalidateReadState(phases_);
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackSuccess(
ReadState&& read_state) {
for (auto& entry : phases_.entries_) {
internal_kvstore::WritebackSuccess(
static_cast<internal_kvstore::ReadModifyWriteEntry&>(entry),
read_state.stamp);
}
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackSuccess(std::move(read_state));
}
void ShardedKeyValueStoreWriteCache::TransactionNode::WritebackError() {
internal_kvstore::WritebackError(phases_);
internal_kvstore::DestroyPhaseEntries(phases_);
Base::TransactionNode::WritebackError();
}
namespace {
void StartApply(ShardedKeyValueStoreWriteCache::TransactionNode& node) {
node.RetryAtomicWriteback(node.apply_options_.staleness_bound);
}
void MergeForWriteback(ShardedKeyValueStoreWriteCache::TransactionNode& node,
bool conditional) {
TimestampedStorageGeneration stamp;
std::shared_ptr<const EncodedChunks> shared_existing_chunks;
span<const EncodedChunk> existing_chunks;
if (conditional) {
auto lock = internal::AsyncCache::ReadLock<EncodedChunks>{node};
stamp = lock.stamp();
shared_existing_chunks = lock.shared_data();
existing_chunks = *shared_existing_chunks;
} else {
stamp = TimestampedStorageGeneration::Unconditional();
}
std::vector<EncodedChunk> chunks;
size_t existing_index = 0;
bool mismatch = false;
bool changed = false;
for (auto& entry : node.phases_.entries_) {
auto& buffered_entry =
static_cast<internal_kvstore::AtomicMultiPhaseMutation::
BufferedReadModifyWriteEntry&>(entry);
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation) &&
StorageGeneration::Clean(entry_stamp.generation) !=
StorageGeneration::Clean(stamp.generation)) {
mismatch = true;
break;
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kUnspecified ||
!StorageGeneration::IsInnerLayerDirty(entry_stamp.generation)) {
continue;
}
auto minishard_and_chunk_id = GetMinishardAndChunkId(buffered_entry.key_);
while (existing_index < static_cast<size_t>(existing_chunks.size())) {
auto& existing_chunk = existing_chunks[existing_index];
if (existing_chunk.minishard_and_chunk_id < minishard_and_chunk_id) {
chunks.push_back(existing_chunk);
++existing_index;
} else if (existing_chunk.minishard_and_chunk_id ==
minishard_and_chunk_id) {
changed = true;
++existing_index;
break;
} else {
break;
}
}
if (buffered_entry.value_state_ == kvstore::ReadResult::kValue) {
chunks.push_back(
EncodedChunk{minishard_and_chunk_id, buffered_entry.value_});
changed = true;
}
}
if (mismatch) {
node.apply_options_.staleness_bound = absl::Now();
StartApply(node);
return;
}
chunks.insert(chunks.end(), existing_chunks.begin() + existing_index,
existing_chunks.end());
internal::AsyncCache::ReadState update;
update.stamp = std::move(stamp);
if (changed) {
update.stamp.generation.MarkDirty();
}
update.data = std::make_shared<EncodedChunks>(std::move(chunks));
execution::set_value(std::exchange(node.apply_receiver_, {}),
std::move(update));
}
}
void ShardedKeyValueStoreWriteCache::TransactionNode::DoApply(
ApplyOptions options, ApplyReceiver receiver) {
apply_receiver_ = std::move(receiver);
apply_options_ = options;
apply_status_ = absl::Status();
GetOwningCache(*this).executor()([this] { StartApply(*this); });
}
void ShardedKeyValueStoreWriteCache::TransactionNode::AllEntriesDone(
internal_kvstore::SinglePhaseMutation& single_phase_mutation) {
if (!apply_status_.ok()) {
execution::set_error(std::exchange(apply_receiver_, {}),
std::exchange(apply_status_, {}));
return;
}
auto& self = *this;
GetOwningCache(*this).executor()([&self] {
TimestampedStorageGeneration stamp;
bool mismatch = false;
bool modified = false;
size_t num_chunks = 0;
for (auto& entry : self.phases_.entries_) {
auto& buffered_entry =
static_cast<AtomicMultiPhaseMutation::BufferedReadModifyWriteEntry&>(
entry);
if (buffered_entry.value_state_ != kvstore::ReadResult::kUnspecified) {
modified = true;
++num_chunks;
}
auto& entry_stamp = buffered_entry.stamp();
if (StorageGeneration::IsConditional(entry_stamp.generation)) {
if (!StorageGeneration::IsUnknown(stamp.generation) &&
StorageGeneration::Clean(stamp.generation) !=
StorageGeneration::Clean(entry_stamp.generation)) {
mismatch = true;
break;
} else {
stamp = entry_stamp;
}
}
}
if (mismatch) {
self.apply_options_.staleness_bound = absl::Now();
StartApply(self);
return;
}
auto& cache = GetOwningCache(self);
if (!modified && StorageGeneration::IsUnknown(stamp.generation) &&
self.apply_options_.apply_mode !=
ApplyOptions::ApplyMode::kSpecifyUnchanged) {
internal::AsyncCache::ReadState update;
update.stamp = TimestampedStorageGeneration::Unconditional();
execution::set_value(std::exchange(self.apply_receiver_, {}),
std::move(update));
return;
}
if (!StorageGeneration::IsUnknown(stamp.generation) ||
!cache.get_max_chunks_per_shard_ ||
cache.get_max_chunks_per_shard_(GetOwningEntry(self).shard()) !=
num_chunks) {
self.internal::AsyncCache::TransactionNode::Read(
{self.apply_options_.staleness_bound})
.ExecuteWhenReady([&self](ReadyFuture<const void> future) {
if (!future.result().ok()) {
execution::set_error(std::exchange(self.apply_receiver_, {}),
future.result().status());
return;
}
GetOwningCache(self).executor()(
[&self] { MergeForWriteback(self, true); });
});
return;
}
MergeForWriteback(self, false);
});
}
Result<ChunkId> KeyToChunkIdOrError(std::string_view key) {
if (auto chunk_id = KeyToChunkId(key)) {
return *chunk_id;
}
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid key: ", tensorstore::QuoteString(key)));
}
}
struct ShardedKeyValueStoreSpecData {
Context::Resource<internal::CachePoolResource> cache_pool;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency;
kvstore::Spec base;
ShardingSpec metadata;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ShardedKeyValueStoreSpecData,
internal_json_binding::NoOptions,
IncludeDefaults,
::nlohmann::json::object_t)
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.cache_pool, x.data_copy_concurrency, x.base, x.metadata);
};
};
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ShardedKeyValueStoreSpecData,
jb::Object(
jb::Member("base",
jb::Projection<&ShardedKeyValueStoreSpecData::base>()),
jb::Initialize([](auto* obj) {
internal::EnsureDirectoryPath(obj->base.path);
return absl::OkStatus();
}),
jb::Member("metadata",
jb::Projection<&ShardedKeyValueStoreSpecData::metadata>(
jb::DefaultInitializedValue())),
jb::Member(internal::CachePoolResource::id,
jb::Projection<&ShardedKeyValueStoreSpecData::cache_pool>()),
jb::Member(
internal::DataCopyConcurrencyResource::id,
jb::Projection<
&ShardedKeyValueStoreSpecData::data_copy_concurrency>())));
class ShardedKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<
ShardedKeyValueStoreSpec, ShardedKeyValueStoreSpecData> {
public:
static constexpr char id[] = "neuroglancer_uint64_sharded";
Future<kvstore::DriverPtr> DoOpen() const override;
Result<kvstore::Spec> GetBase(std::string_view path) const override {
return data_.base;
}
};
class ShardedKeyValueStore
: public internal_kvstore::RegisteredDriver<ShardedKeyValueStore,
ShardedKeyValueStoreSpec> {
public:
explicit ShardedKeyValueStore(
kvstore::DriverPtr base_kvstore, Executor executor,
std::string key_prefix, const ShardingSpec& sharding_spec,
internal::CachePool::WeakPtr cache_pool,
GetMaxChunksPerShardFunction get_max_chunks_per_shard = {})
: write_cache_(internal::GetCache<ShardedKeyValueStoreWriteCache>(
cache_pool.get(), "",
[&] {
return std::make_unique<ShardedKeyValueStoreWriteCache>(
internal::GetCache<MinishardIndexCache>(
cache_pool.get(), "",
[&] {
return std::make_unique<MinishardIndexCache>(
std::move(base_kvstore), std::move(executor),
std::move(key_prefix), sharding_spec);
}),
std::move(get_max_chunks_per_shard));
})),
is_raw_encoding_(sharding_spec.data_encoding ==
ShardingSpec::DataEncoding::raw) {}
Future<ReadResult> Read(Key key, ReadOptions options) override;
void ListImpl(ListOptions options, ListReceiver receiver) override {
struct State {
ListReceiver receiver_;
Promise<void> promise_;
Future<void> future_;
ListOptions options_;
explicit State(ListReceiver&& receiver, ListOptions&& options)
: receiver_(std::move(receiver)), options_(std::move(options)) {
auto [promise, future] = PromiseFuturePair<void>::Make(MakeResult());
this->promise_ = std::move(promise);
this->future_ = std::move(future);
future_.Force();
execution::set_starting(receiver_, [promise = promise_] {
promise.SetResult(absl::CancelledError(""));
});
}
~State() {
auto& r = promise_.raw_result();
if (r.ok()) {
execution::set_done(receiver_);
} else {
execution::set_error(receiver_, r.status());
}
execution::set_stopping(receiver_);
}
};
auto state =
std::make_shared<State>(std::move(receiver), std::move(options));
ShardIndex num_shards = ShardIndex{1} << sharding_spec().shard_bits;
for (ShardIndex shard = 0; shard < num_shards; ++shard) {
auto entry = GetCacheEntry(
write_cache_, ShardedKeyValueStoreWriteCache::ShardToKey(shard));
LinkValue(
[state, entry, is_raw_encoding = is_raw_encoding_](
Promise<void> promise, ReadyFuture<const void> future) {
auto chunks = internal::AsyncCache::ReadLock<EncodedChunks>(*entry)
.shared_data();
if (!chunks) return;
for (auto& chunk : *chunks) {
auto key = ChunkIdToKey(chunk.minishard_and_chunk_id.chunk_id);
if (!Contains(state->options_.range, key)) continue;
key.erase(0, state->options_.strip_prefix_length);
execution::set_value(
state->receiver_,
ListEntry{
std::move(key),
is_raw_encoding
? ListEntry::checked_size(chunk.encoded_data.size())
: -1,
});
}
},
state->promise_, entry->Read({absl::InfiniteFuture()}));
}
}
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override {
return internal_kvstore::WriteViaTransaction(
this, std::move(key), std::move(value), std::move(options));
}
absl::Status ReadModifyWrite(internal::OpenTransactionPtr& transaction,
size_t& phase, Key key,
ReadModifyWriteSource& source) override {
TENSORSTORE_ASSIGN_OR_RETURN(ChunkId chunk_id, KeyToChunkIdOrError(key));
const auto& sharding_spec = this->sharding_spec();
const auto shard_info = GetSplitShardInfo(
sharding_spec, GetChunkShardInfo(sharding_spec, chunk_id));
const ShardIndex shard = shard_info.shard;
auto entry = GetCacheEntry(
write_cache_, ShardedKeyValueStoreWriteCache::ShardToKey(shard));
std::string key_within_shard;
key_within_shard.resize(16);
absl::big_endian::Store64(key_within_shard.data(), shard_info.minishard);
absl::big_endian::Store64(key_within_shard.data() + 8, chunk_id.value);
TENSORSTORE_ASSIGN_OR_RETURN(
auto node, GetWriteLockedTransactionNode(*entry, transaction));
node->ReadModifyWrite(phase, std::move(key_within_shard), source);
if (!transaction) {
transaction.reset(node.unlock()->transaction());
}
return absl::OkStatus();
}
absl::Status TransactionalDeleteRange(
const internal::OpenTransactionPtr& transaction,
KeyRange range) override {
return absl::UnimplementedError("DeleteRange not supported");
}
std::string DescribeKey(std::string_view key) override {
auto chunk_id_opt = KeyToChunkId(key);
if (!chunk_id_opt) {
return tensorstore::StrCat("invalid key ", tensorstore::QuoteString(key));
}
const auto& sharding_spec = this->sharding_spec();
const auto shard_info = GetSplitShardInfo(
sharding_spec, GetChunkShardInfo(sharding_spec, *chunk_id_opt));
return tensorstore::StrCat(
"chunk ", chunk_id_opt->value, " in minishard ", shard_info.minishard,
" in ",
base_kvstore_driver()->DescribeKey(
GetShardKey(sharding_spec, key_prefix(), shard_info.shard)));
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return base_kvstore_driver()->GetSupportedFeatures(
KeyRange::Prefix(key_prefix()));
}
Result<KvStore> GetBase(std::string_view path,
const Transaction& transaction) const override {
return KvStore(kvstore::DriverPtr(base_kvstore_driver()), key_prefix(),
transaction);
}
kvstore::Driver* base_kvstore_driver() const {
return minishard_index_cache()->base_kvstore_driver();
}
const ShardingSpec& sharding_spec() const {
return minishard_index_cache()->sharding_spec();
}
const Executor& executor() const {
return minishard_index_cache()->executor();
}
const std::string& key_prefix() const {
return minishard_index_cache()->key_prefix();
}
const internal::CachePtr<MinishardIndexCache>& minishard_index_cache() const {
return write_cache_->minishard_index_cache_;
}
absl::Status GetBoundSpecData(ShardedKeyValueStoreSpecData& spec) const;
internal::CachePtr<ShardedKeyValueStoreWriteCache> write_cache_;
Context::Resource<internal::CachePoolResource> cache_pool_resource_;
Context::Resource<internal::DataCopyConcurrencyResource>
data_copy_concurrency_resource_;
bool is_raw_encoding_ = false;
};
namespace {
class ReadOperationState;
using ReadOperationStateBase = internal_kvstore_batch::BatchReadEntry<
ShardedKeyValueStore,
internal_kvstore_batch::ReadRequest<MinishardAndChunkId,
kvstore::ReadGenerationConditions>,
ShardIndex>;
class ReadOperationState
: public ReadOperationStateBase,
public internal::AtomicReferenceCount<ReadOperationState> {
public:
explicit ReadOperationState(BatchEntryKey&& batch_entry_key_)
: ReadOperationStateBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<ReadOperationState>(
1) {}
private:
Batch retry_batch_{no_batch};
void Submit(Batch::View batch) override {
const auto& executor = driver().executor();
executor(
[this, batch = Batch(batch)] { this->ProcessBatch(std::move(batch)); });
}
void ProcessBatch(Batch batch) {
internal::IntrusivePtr<ReadOperationState> self(this,
internal::adopt_object_ref);
span<Request> requests = request_batch.requests;
std::sort(request_batch.requests.begin(), request_batch.requests.end(),
[](const Request& a, const Request& b) {
return std::get<MinishardAndChunkId>(a) <
std::get<MinishardAndChunkId>(b);
});
if (ShouldReadEntireShard()) {
ReadEntireShard(std::move(self), std::move(batch));
return;
}
retry_batch_ = Batch::New();
Batch data_fetch_batch{no_batch};
for (size_t minishard_start_i = 0; minishard_start_i < requests.size();) {
size_t minishard_end_i = minishard_start_i + 1;
auto minishard =
std::get<MinishardAndChunkId>(requests[minishard_start_i]).minishard;
while (
minishard_end_i < requests.size() &&
std::get<MinishardAndChunkId>(requests[minishard_end_i]).minishard ==
minishard) {
++minishard_end_i;
}
ProcessMinishard(batch, minishard,
requests.subspan(minishard_start_i,
minishard_end_i - minishard_start_i),
data_fetch_batch);
minishard_start_i = minishard_end_i;
}
}
bool ShouldReadEntireShard() {
const auto& get_max_chunks_per_shard =
driver().write_cache_->get_max_chunks_per_shard_;
if (!get_max_chunks_per_shard) return false;
uint64_t max_chunks =
get_max_chunks_per_shard(std::get<ShardIndex>(batch_entry_key));
if (request_batch.requests.size() < max_chunks) {
return false;
}
const auto& first_request = request_batch.requests[0];
uint64_t num_chunks_covered = 0;
std::optional<uint64_t> prev_chunk_covered;
for (const auto& request : request_batch.requests) {
if (std::get<kvstore::ReadGenerationConditions>(request) !=
std::get<kvstore::ReadGenerationConditions>(first_request)) {
return false;
}
if (std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.IsFull()) {
uint64_t chunk_id =
std::get<MinishardAndChunkId>(request).chunk_id.value;
if (chunk_id != prev_chunk_covered) {
prev_chunk_covered = chunk_id;
++num_chunks_covered;
}
}
}
return (num_chunks_covered == max_chunks);
}
std::string ShardKey() {
const auto& sharding_spec = driver().sharding_spec();
return GetShardKey(sharding_spec, driver().key_prefix(),
std::get<ShardIndex>(batch_entry_key));
}
static void ReadEntireShard(internal::IntrusivePtr<ReadOperationState> self,
Batch batch) {
auto& first_request = self->request_batch.requests[0];
kvstore::ReadOptions read_options;
read_options.batch = std::move(batch);
read_options.generation_conditions =
std::move(std::get<kvstore::ReadGenerationConditions>(first_request));
read_options.staleness_bound = self->request_batch.staleness_bound;
auto& driver = self->driver();
auto read_future = driver.base_kvstore_driver()->Read(
GetShardKey(driver.sharding_spec(), driver.key_prefix(),
std::get<ShardIndex>(self->batch_entry_key)),
std::move(read_options));
read_future.Force();
std::move(read_future)
.ExecuteWhenReady([self = std::move(self)](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), future = std::move(future)] {
OnEntireShardReady(std::move(self), std::move(future.result()));
});
});
}
static void OnEntireShardReady(
internal::IntrusivePtr<ReadOperationState> self,
Result<kvstore::ReadResult>&& result) {
if (!result.ok() || !result->has_value()) {
internal_kvstore_batch::SetCommonResult(self->request_batch.requests,
std::move(result));
return;
}
auto& read_result = *result;
const auto& sharding_spec = self->driver().sharding_spec();
TENSORSTORE_ASSIGN_OR_RETURN(auto encoded_chunks,
SplitShard(sharding_spec, read_result.value),
internal_kvstore_batch::SetCommonResult(
self->request_batch.requests, _));
span<Request> requests = self->request_batch.requests;
size_t request_i = 0;
const auto complete_not_found = [&](Request& request) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(kvstore::ReadResult::Missing(read_result.stamp));
};
for (const auto& encoded_chunk : encoded_chunks) {
auto decoded_data_result =
DecodeData(encoded_chunk.encoded_data, sharding_spec.data_encoding);
const auto complete_request =
[&](Request& request) -> Result<kvstore::ReadResult> {
TENSORSTORE_ASSIGN_OR_RETURN(
const auto& decoded_data, decoded_data_result,
internal::ConvertInvalidArgumentToFailedPrecondition(_));
TENSORSTORE_ASSIGN_OR_RETURN(
auto validated_byte_range,
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.byte_range.Validate(decoded_data.size()));
kvstore::ReadResult request_read_result;
request_read_result.stamp = read_result.stamp;
request_read_result.state = kvstore::ReadResult::kValue;
request_read_result.value =
internal::GetSubCord(decoded_data, validated_byte_range);
return request_read_result;
};
auto decoded_key = encoded_chunk.minishard_and_chunk_id;
for (; request_i < requests.size(); ++request_i) {
auto& request = requests[request_i];
auto request_key = std::get<MinishardAndChunkId>(request);
if (request_key < decoded_key) {
complete_not_found(request);
} else if (request_key == decoded_key) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(complete_request(request));
} else {
break;
}
}
}
for (; request_i < requests.size(); ++request_i) {
complete_not_found(requests[request_i]);
}
}
void ProcessMinishard(Batch::View batch, MinishardIndex minishard,
span<Request> requests, Batch& data_fetch_batch) {
ChunkSplitShardInfo split_shard_info;
split_shard_info.shard = std::get<ShardIndex>(batch_entry_key);
split_shard_info.minishard = minishard;
auto shard_info =
GetCombinedShardInfo(driver().sharding_spec(), split_shard_info);
auto minishard_index_cache_entry = GetCacheEntry(
driver().minishard_index_cache(),
std::string_view(reinterpret_cast<const char*>(&shard_info),
sizeof(shard_info)));
auto minishard_index_read_future = minishard_index_cache_entry->Read(
{request_batch.staleness_bound, batch});
Batch successor_batch{no_batch};
if (batch) {
if (minishard_index_read_future.ready()) {
successor_batch = batch;
} else {
if (!data_fetch_batch) {
data_fetch_batch = Batch::New();
}
successor_batch = data_fetch_batch;
}
}
const auto& executor = driver().executor();
std::move(minishard_index_read_future)
.ExecuteWhenReady(WithExecutor(
executor,
[self = internal::IntrusivePtr<ReadOperationState>(this), requests,
minishard_index_cache_entry =
std::move(minishard_index_cache_entry),
successor_batch = std::move(successor_batch)](
ReadyFuture<const void> future) mutable {
const auto& status = future.status();
if (!status.ok()) {
internal_kvstore_batch::SetCommonResult<Request>(requests,
{status});
return;
}
OnMinishardIndexReady(std::move(self), requests,
std::move(successor_batch),
std::move(minishard_index_cache_entry));
}));
}
static void OnMinishardIndexReady(
internal::IntrusivePtr<ReadOperationState> self, span<Request> requests,
Batch successor_batch,
internal::PinnedCacheEntry<MinishardIndexCache>
minishard_index_cache_entry) {
std::shared_ptr<const std::vector<MinishardIndexEntry>> minishard_index;
TimestampedStorageGeneration stamp;
{
auto lock = internal::AsyncCache::ReadLock<MinishardIndexCache::ReadData>(
*minishard_index_cache_entry);
stamp = lock.stamp();
minishard_index = lock.shared_data();
}
assert(!StorageGeneration::IsUnknown(stamp.generation));
if (!minishard_index) {
internal_kvstore_batch::SetCommonResult(
requests, kvstore::ReadResult::Missing(std::move(stamp)));
return;
}
const auto& sharding_spec = self->driver().sharding_spec();
const auto process_chunk = [&](ChunkId chunk_id,
span<Request> chunk_requests) {
auto byte_range = FindChunkInMinishard(*minishard_index, chunk_id);
if (!byte_range) {
internal_kvstore_batch::SetCommonResult(
chunk_requests, kvstore::ReadResult::Missing(stamp));
return;
}
int64_t size = byte_range->size();
chunk_requests = chunk_requests.first(
std::remove_if(
chunk_requests.begin(), chunk_requests.end(),
[&](Request& request) {
return !internal_kvstore_batch::ValidateRequestGeneration(
request, stamp);
}) -
chunk_requests.begin());
if (sharding_spec.data_encoding == ShardingSpec::DataEncoding::raw) {
const auto process_request = [&](Request& request) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto sub_byte_range, byte_range_request.byte_range.Validate(size),
static_cast<void>(byte_range_request.promise.SetResult(_)));
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal =
stamp.generation;
kvstore_read_options.staleness_bound =
self->request_batch.staleness_bound;
kvstore_read_options.byte_range = ByteRange{
byte_range->inclusive_min + sub_byte_range.inclusive_min,
byte_range->inclusive_min + sub_byte_range.exclusive_max};
kvstore_read_options.batch = successor_batch;
auto value_read_future = self->driver().base_kvstore_driver()->Read(
self->ShardKey(), std::move(kvstore_read_options));
value_read_future.Force();
std::move(value_read_future)
.ExecuteWhenReady([self,
&request](ReadyFuture<kvstore::ReadResult>
future) mutable {
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, std::move(future.result()),
static_cast<void>(
std::get<internal_kvstore_batch::ByteRangeReadRequest>(
request)
.promise.SetResult(_)));
self->OnRawValueReady(request, std::move(read_result));
});
};
for (auto& request : chunk_requests) {
process_request(request);
}
} else {
kvstore::ReadOptions kvstore_read_options;
kvstore_read_options.generation_conditions.if_equal = stamp.generation;
kvstore_read_options.staleness_bound =
self->request_batch.staleness_bound;
kvstore_read_options.byte_range = *byte_range;
kvstore_read_options.batch = successor_batch;
auto value_read_future = self->driver().base_kvstore_driver()->Read(
self->ShardKey(), std::move(kvstore_read_options));
value_read_future.Force();
std::move(value_read_future)
.ExecuteWhenReady(
[self, chunk_requests](
ReadyFuture<kvstore::ReadResult> future) mutable {
const auto& executor = self->driver().executor();
executor([self = std::move(self), chunk_requests,
future = std::move(future)] {
TENSORSTORE_ASSIGN_OR_RETURN(
auto&& read_result, std::move(future.result()),
internal_kvstore_batch::SetCommonResult(chunk_requests,
_));
self->OnEncodedValueReady(chunk_requests,
std::move(read_result));
});
});
}
};
for (size_t request_i = 0; request_i < requests.size();) {
ChunkId chunk_id =
std::get<MinishardAndChunkId>(requests[request_i]).chunk_id;
size_t end_request_i;
for (end_request_i = request_i + 1; end_request_i < requests.size();
++end_request_i) {
if (std::get<MinishardAndChunkId>(requests[end_request_i]).chunk_id !=
chunk_id)
break;
}
process_chunk(chunk_id,
requests.subspan(request_i, end_request_i - request_i));
request_i = end_request_i;
}
}
void OnRawValueReady(Request& request, kvstore::ReadResult&& read_result) {
if (read_result.aborted()) {
MakeRequest<ReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key), retry_batch_,
read_result.stamp.time, std::move(request));
return;
}
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(std::move(read_result));
}
void OnEncodedValueReady(span<Request> chunk_requests,
kvstore::ReadResult&& read_result) {
if (read_result.aborted()) {
for (auto& request : chunk_requests) {
MakeRequest<ReadOperationState>(
driver(), std::get<ShardIndex>(batch_entry_key), retry_batch_,
read_result.stamp.time, std::move(request));
}
return;
}
if (!read_result.has_value()) {
internal_kvstore_batch::SetCommonResult(chunk_requests,
std::move(read_result));
return;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto decoded_value,
DecodeData(read_result.value, driver().sharding_spec().data_encoding),
internal_kvstore_batch::SetCommonResult(
chunk_requests,
internal::ConvertInvalidArgumentToFailedPrecondition(_)));
const auto process_request =
[&](Request& request) -> Result<kvstore::ReadResult> {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request);
TENSORSTORE_ASSIGN_OR_RETURN(
auto byte_range,
byte_range_request.byte_range.Validate(decoded_value.size()));
return kvstore::ReadResult::Value(
internal::GetSubCord(decoded_value, byte_range), read_result.stamp);
};
for (auto& request : chunk_requests) {
std::get<internal_kvstore_batch::ByteRangeReadRequest>(request)
.promise.SetResult(process_request(request));
}
}
};
}
Future<kvstore::ReadResult> ShardedKeyValueStore::Read(Key key,
ReadOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(ChunkId chunk_id, KeyToChunkIdOrError(key));
const auto& sharding_spec = this->sharding_spec();
auto shard_info = GetChunkShardInfo(sharding_spec, chunk_id);
auto split_shard_info = GetSplitShardInfo(sharding_spec, shard_info);
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
ReadOperationState::MakeRequest<ReadOperationState>(
*this, split_shard_info.shard, options.batch, options.staleness_bound,
ReadOperationState::Request{
{std::move(promise), options.byte_range},
MinishardAndChunkId{split_shard_info.minishard, chunk_id},
std::move(options.generation_conditions)});
return std::move(future);
}
}
namespace garbage_collection {
template <>
struct GarbageCollection<neuroglancer_uint64_sharded::ShardedKeyValueStore> {
static void Visit(
GarbageCollectionVisitor& visitor,
const neuroglancer_uint64_sharded::ShardedKeyValueStore& value) {
garbage_collection::GarbageCollectionVisit(visitor,
*value.base_kvstore_driver());
}
};
}
namespace neuroglancer_uint64_sharded {
absl::Status ShardedKeyValueStore::GetBoundSpecData(
ShardedKeyValueStoreSpecData& spec) const {
TENSORSTORE_ASSIGN_OR_RETURN(spec.base.driver,
base_kvstore_driver()->GetBoundSpec());
spec.base.path = key_prefix();
if (!data_copy_concurrency_resource_.has_resource() ||
!cache_pool_resource_.has_resource()) {
return absl::InternalError("JSON representation not supported");
}
spec.data_copy_concurrency = data_copy_concurrency_resource_;
spec.cache_pool = cache_pool_resource_;
spec.metadata = sharding_spec();
return absl::Status();
}
Future<kvstore::DriverPtr> ShardedKeyValueStoreSpec::DoOpen() const {
return MapFutureValue(
InlineExecutor{},
[spec = internal::IntrusivePtr<const ShardedKeyValueStoreSpec>(this)](
kvstore::KvStore& base_kvstore) -> Result<kvstore::DriverPtr> {
auto driver = internal::MakeIntrusivePtr<ShardedKeyValueStore>(
std::move(base_kvstore.driver),
spec->data_.data_copy_concurrency->executor,
std::move(base_kvstore.path), spec->data_.metadata,
*spec->data_.cache_pool);
driver->data_copy_concurrency_resource_ =
spec->data_.data_copy_concurrency;
driver->cache_pool_resource_ = spec->data_.cache_pool;
return driver;
},
kvstore::Open(data_.base));
}
kvstore::DriverPtr GetShardedKeyValueStore(
kvstore::DriverPtr base_kvstore, Executor executor, std::string key_prefix,
const ShardingSpec& sharding_spec, internal::CachePool::WeakPtr cache_pool,
GetMaxChunksPerShardFunction get_max_chunks_per_shard) {
return kvstore::DriverPtr(new ShardedKeyValueStore(
std::move(base_kvstore), std::move(executor), std::move(key_prefix),
sharding_spec, std::move(cache_pool),
std::move(get_max_chunks_per_shard)));
}
std::string ChunkIdToKey(ChunkId chunk_id) {
std::string key;
key.resize(sizeof(uint64_t));
absl::big_endian::Store64(key.data(), chunk_id.value);
return key;
}
std::optional<ChunkId> KeyToChunkId(std::string_view key) {
if (key.size() != 8) return std::nullopt;
return ChunkId{absl::big_endian::Load64(key.data())};
}
}
}
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::neuroglancer_uint64_sharded::ShardedKeyValueStoreSpec>
registration;
} | #include "tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <initializer_list>
#include <map>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache/cache.h"
#include "tensorstore/internal/cache/kvs_backed_cache_testutil.h"
#include "tensorstore/internal/compression/zlib.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/internal/thread/thread_pool.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/memory/memory_key_value_store.h"
#include "tensorstore/kvstore/mock_kvstore.h"
#include "tensorstore/kvstore/neuroglancer_uint64_sharded/uint64_sharded.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
namespace zlib = ::tensorstore::zlib;
namespace kvstore = ::tensorstore::kvstore;
using ::tensorstore::Batch;
using ::tensorstore::Future;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::Result;
using ::tensorstore::StorageGeneration;
using ::tensorstore::TimestampedStorageGeneration;
using ::tensorstore::Transaction;
using ::tensorstore::internal::CachePool;
using ::tensorstore::internal::GetCache;
using ::tensorstore::internal::KvsBackedTestCache;
using ::tensorstore::internal::MatchesKvsReadResult;
using ::tensorstore::internal::MatchesKvsReadResultAborted;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal::MockKeyValueStore;
using ::tensorstore::internal::UniqueNow;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::neuroglancer_uint64_sharded::ChunkIdToKey;
using ::tensorstore::neuroglancer_uint64_sharded::GetShardedKeyValueStore;
using ::tensorstore::neuroglancer_uint64_sharded::ShardingSpec;
constexpr CachePool::Limits kSmallCacheLimits{10000000};
absl::Cord Bytes(std::initializer_list<unsigned char> x) {
return absl::Cord(std::string(x.begin(), x.end()));
}
std::string GetChunkKey(uint64_t chunk_id) { return ChunkIdToKey({chunk_id}); }
class GetUint64Key {
public:
GetUint64Key(bool sequential) : sequential_(sequential) {}
std::string operator()(std::string key) const {
auto it = key_to_uint64_.find(key);
if (it == key_to_uint64_.end()) {
while (true) {
auto x = sequential_ ? next_chunk_id_++ : absl::Uniform<uint64_t>(gen_);
if (uint64_to_key_.emplace(x, key).second) {
it = key_to_uint64_.emplace(key, x).first;
break;
}
}
}
return GetChunkKey(it->second);
}
private:
bool sequential_;
mutable uint64_t next_chunk_id_ = 0;
mutable absl::BitGen gen_;
mutable absl::flat_hash_map<std::string, uint64_t> key_to_uint64_;
mutable absl::flat_hash_map<uint64_t, std::string> uint64_to_key_;
};
tensorstore::Executor GetExecutor(std::string_view executor_name) {
if (executor_name == "inline") return tensorstore::InlineExecutor{};
return tensorstore::internal::DetachedThreadPool(2);
}
struct BasicFunctionalityTestOptions {
std::string_view executor_name = "thread_pool";
bool sequential_ids = false;
std::string_view hash = "identity";
std::string_view data_encoding = "raw";
std::string_view minishard_index_encoding = "raw";
bool all_zero_bits = false;
};
void TestReadWriteOps(BasicFunctionalityTestOptions options) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", options.hash},
{"preshift_bits", options.all_zero_bits ? 0 : 1},
{"minishard_bits", options.all_zero_bits ? 0 : 2},
{"shard_bits", options.all_zero_bits ? 0 : 3},
{"data_encoding", options.data_encoding},
{"minishard_index_encoding", options.minishard_index_encoding}};
auto cache_pool = CachePool::Make(kSmallCacheLimits);
auto base_kv_store = tensorstore::GetMemoryKeyValueStore();
auto sharding_spec = ShardingSpec::FromJson(sharding_spec_json).value();
SCOPED_TRACE(options.executor_name);
SCOPED_TRACE(sharding_spec_json.dump());
auto store = GetShardedKeyValueStore(
base_kv_store, GetExecutor(options.executor_name), "prefix",
sharding_spec, CachePool::WeakPtr(cache_pool));
GetUint64Key get_key_fn(options.sequential_ids);
tensorstore::internal::TestKeyValueReadWriteOps(store, get_key_fn);
}
TEST(Uint64ShardedKeyValueStoreTest, BasicFunctionality) {
{
BasicFunctionalityTestOptions options;
TestReadWriteOps(options);
options.sequential_ids = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.hash = "murmurhash3_x86_128";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.data_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.minishard_index_encoding = "gzip";
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.all_zero_bits = true;
TestReadWriteOps(options);
}
{
BasicFunctionalityTestOptions options;
options.executor_name = "inline";
TestReadWriteOps(options);
}
}
TEST(Uint64ShardedKeyValueStoreTest, DescribeKey) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
for (const auto& [key, description] :
std::vector<std::pair<uint64_t, std::string>>{
{0, "chunk 0 in minishard 0 in \"prefix/0.shard\""},
{1, "chunk 1 in minishard 1 in \"prefix/0.shard\""},
{2, "chunk 2 in minishard 0 in \"prefix/1.shard\""},
{3, "chunk 3 in minishard 1 in \"prefix/1.shard\""},
}) {
EXPECT_EQ(description, store->DescribeKey(GetChunkKey(key)));
}
}
class RawEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(RawEncodingTest, MultipleUnconditionalWrites) {
std::vector<absl::Cord> values{absl::Cord("abc"), absl::Cord("aaaaa"),
absl::Cord("efgh")};
std::vector<Future<TimestampedStorageGeneration>> futures;
auto key = GetChunkKey(10);
tensorstore::Transaction txn(tensorstore::isolated);
for (auto value : values) {
futures.push_back(kvstore::WriteCommitted(KvStore{store, txn}, key, value));
}
txn.CommitAsync().IgnoreFuture();
std::vector<Result<TimestampedStorageGeneration>> results;
for (const auto& future : futures) {
results.push_back(future.result());
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
for (size_t i = 0; i < results.size(); ++i) {
if (results[i] && results[i]->generation == shard_read.stamp.generation) {
EXPECT_THAT(store->Read(key).result(),
MatchesKvsReadResult(values[i], results[i]->generation));
}
}
}
TEST_F(RawEncodingTest, List) {
std::map<std::string, absl::Cord> values{
{GetChunkKey(1), absl::Cord("a")},
{GetChunkKey(2), absl::Cord("bc")},
{GetChunkKey(3), absl::Cord("def")},
{GetChunkKey(10), absl::Cord("xyz")}};
for (auto [key, value] : values) {
TENSORSTORE_EXPECT_OK(store->Write(key, value));
}
EXPECT_THAT(tensorstore::internal::GetMap(store),
::testing::Optional(::testing::ElementsAreArray(values)));
}
TEST_F(RawEncodingTest, WritesAndDeletes) {
StorageGeneration gen1, gen2, gen3;
{
tensorstore::Transaction txn(tensorstore::isolated);
auto init_future1 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(1), absl::Cord("a"));
auto init_future2 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(2), absl::Cord("bc"));
auto init_future3 = kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(3), absl::Cord("def"));
txn.CommitAsync().IgnoreFuture();
gen1 = init_future1.value().generation;
gen2 = init_future2.value().generation;
gen3 = init_future3.value().generation;
}
tensorstore::Transaction txn(tensorstore::isolated);
auto future1 = kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("ww"), {gen2});
auto future3 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(2),
absl::Cord("xx"), {gen2});
auto future4 =
kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(4),
absl::Cord("zz"), {StorageGeneration::NoValue()});
auto future5 =
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(3), {gen3});
txn.CommitAsync().IgnoreFuture();
EXPECT_THAT(future1.result(), MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto shard_read, base_kv_store->Read("prefix/0.shard").result());
EXPECT_THAT(
std::vector({future2.result(), future3.result()}),
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()),
MatchesTimestampedStorageGeneration(shard_read.stamp.generation)));
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
EXPECT_THAT(store->Read(GetChunkKey(2)).result(),
MatchesKvsReadResult(
!StorageGeneration::IsUnknown(future2.result()->generation)
? absl::Cord("ww")
: absl::Cord("xx")));
EXPECT_THAT(store->Read(GetChunkKey(3)).result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(store->Read(GetChunkKey(4)).result(),
MatchesKvsReadResult(absl::Cord("zz")));
}
std::vector<std::vector<Result<TimestampedStorageGeneration>>>
TestOrderDependentWrites(
std::function<void()> init,
std::function<Future<TimestampedStorageGeneration>()> op0,
std::function<Future<TimestampedStorageGeneration>()> op1,
std::function<void()> finalize) {
std::vector<std::vector<Result<TimestampedStorageGeneration>>> all_results;
for (int i = 0; i < 2; ++i) {
std::vector<Future<TimestampedStorageGeneration>> futures(2);
init();
if (i == 0) {
futures[0] = op0();
futures[1] = op1();
} else {
futures[1] = op1();
futures[0] = op0();
}
finalize();
all_results.push_back({futures[0].result(), futures[1].result()});
}
return all_results;
}
TEST_F(RawEncodingTest, WriteThenDelete) {
TENSORSTORE_ASSERT_OK(store->Write(GetChunkKey(1), absl::Cord("a")).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResult(absl::Cord("a")));
TENSORSTORE_ASSERT_OK(store->Delete(GetChunkKey(1)).result());
EXPECT_THAT(store->Read(GetChunkKey(1)).result(),
MatchesKvsReadResultNotFound());
}
TEST_F(RawEncodingTest, MultipleDeleteExisting) {
StorageGeneration gen;
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
gen = store->Write(GetChunkKey(1), absl::Cord("a"))
.value()
.generation;
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{gen});
},
[&] {
return kvstore::DeleteCommitted(
KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::UnorderedElementsAre(
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(
StorageGeneration::NoValue())),
::testing::ElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue()),
MatchesTimestampedStorageGeneration(
StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, WriteWithUnmatchedConditionAfterDelete) {
tensorstore::Transaction txn{tensorstore::no_transaction};
EXPECT_THAT(
TestOrderDependentWrites(
[&] {
store->Delete(GetChunkKey(0)).value();
txn = tensorstore::Transaction(tensorstore::isolated);
},
[&] {
return kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0),
absl::Cord("a"));
},
[&] {
return kvstore::WriteCommitted(
KvStore{store, txn}, GetChunkKey(0), absl::Cord("b"),
{StorageGeneration::FromString("g")});
},
[&] { txn.CommitAsync().IgnoreFuture(); }),
::testing::Each(::testing::ElementsAre(
MatchesTimestampedStorageGeneration(
::testing::AllOf(::testing::Not(StorageGeneration::NoValue()),
::testing::Not(StorageGeneration::Invalid()))),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()))));
}
TEST_F(RawEncodingTest, MultipleDeleteNonExisting) {
tensorstore::Transaction txn(tensorstore::isolated);
std::vector futures{
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()}),
kvstore::DeleteCommitted(KvStore{store, txn}, GetChunkKey(1),
{StorageGeneration::NoValue()})};
txn.CommitAsync().IgnoreFuture();
std::vector results{futures[0].result(), futures[1].result()};
EXPECT_THAT(
results,
::testing::UnorderedElementsAre(
MatchesTimestampedStorageGeneration(StorageGeneration::Invalid()),
MatchesTimestampedStorageGeneration(StorageGeneration::NoValue())));
}
TEST_F(RawEncodingTest, ShardIndexTooShort) {
base_kv_store->Write("prefix/0.shard", Bytes({1, 2, 3})).value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Requested byte range \\[0, 16\\) is not valid for value of size 3"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Existing shard has size 3, but expected at least: 16"));
}
TEST_F(RawEncodingTest, ShardIndexInvalidByteRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({10, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Shard index specified invalid byte range: \\[10, 2\\)"));
}
TEST_F(RawEncodingTest, ShardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Byte range .* relative to the end of "
"the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexOutOfRange) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 48, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Requested byte range \\[16, 64\\) is "
"not valid for value of size 16"));
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing shard index entry for minishard 0: "
"Requested byte range .* is not valid for value of size 16"));
}
TEST_F(RawEncodingTest, MinishardIndexInvalidSize) {
base_kv_store
->Write("prefix/0.shard",
Bytes({0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Invalid minishard index length: 1"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Invalid minishard index length: 1"));
}
TEST_F(RawEncodingTest, MinishardIndexByteRangeOverflow) {
base_kv_store
->Write("prefix/0.shard",
Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding minishard index entry "
"for chunk 10: Byte range .* relative to the end "
"of the shard index \\(16\\) is not valid"));
}
TEST_F(RawEncodingTest, MinishardIndexEntryByteRangeOutOfRange) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
24, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
200, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(1), absl::Cord("x")).result(),
MatchesStatus(
absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Invalid existing byte range for chunk 10: "
"Requested byte range .* is not valid for value of size .*"));
}
TEST_F(RawEncodingTest, MinishardIndexWithDuplicateChunkId) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
48, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
}))
.value();
EXPECT_THAT(store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Chunk 10 occurs more than once in the minishard "
"index for minishard 0"));
}
class GzipEncodingTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 0},
{"shard_bits", 0},
{"data_encoding", "gzip"},
{"minishard_index_encoding", "gzip"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
kvstore::DriverPtr base_kv_store = tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr store = GetShardedKeyValueStore(
base_kv_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool));
};
TEST_F(GzipEncodingTest, CorruptMinishardGzipEncoding) {
base_kv_store
->Write("prefix/0.shard", Bytes({
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
}))
.value();
EXPECT_THAT(
store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error decoding zlib-compressed data"));
EXPECT_THAT(
store->Write(GetChunkKey(10), absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error reading \"prefix/0\\.shard\": "
"Error decoding existing minishard index for minishard 0: "
"Error decoding zlib-compressed data"));
}
TEST_F(GzipEncodingTest, CorruptDataGzipEncoding) {
absl::Cord shard_data("abc");
zlib::Options zlib_options;
zlib_options.use_gzip_header = true;
zlib::Encode(Bytes({
10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}),
&shard_data, zlib_options);
const unsigned char n = static_cast<unsigned char>(shard_data.size());
absl::Cord temp = Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
n, 0, 0, 0, 0, 0, 0, 0,
});
temp.Append(shard_data);
TENSORSTORE_ASSERT_OK(base_kv_store->Write("prefix/0.shard", temp));
EXPECT_THAT(store->Read(GetChunkKey(10)).result(),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Error decoding zlib-compressed data"));
}
class UnderlyingKeyValueStoreTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
CachePool::StrongPtr cache_pool = CachePool::Make(kSmallCacheLimits);
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
kvstore::DriverPtr GetStore(
tensorstore::neuroglancer_uint64_sharded::GetMaxChunksPerShardFunction
get_max_chunks_per_shard = {}) {
return GetShardedKeyValueStore(
mock_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(cache_pool), std::move(get_max_chunks_per_shard));
}
kvstore::DriverPtr store = GetStore();
};
TEST_F(UnderlyingKeyValueStoreTest, Read) {
absl::Time init_time = UniqueNow();
absl::Time minishard_index_time;
{
auto future = store->Read(GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(init_time));
req.promise.SetResult(ReadResult::Value(
Bytes({
5, 0, 0, 0, 0, 0, 0, 0,
31, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(37, 63), req.options.byte_range);
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
5, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), minishard_index_time}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x60), options);
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(minishard_index_time));
}
{
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x60), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), minishard_index_time}));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesKvsReadResultNotFound(minishard_index_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x50), options);
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
EXPECT_EQ(init_time, req.options.staleness_bound);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g0"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({5, 6, 7, 8, 9}),
StorageGeneration::FromString("g0"), read_time));
}
{
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x50), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 37), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g0"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
req.promise.SetResult(ReadResult::Value(
Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g1"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
minishard_index_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g1"), minishard_index_time}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g1"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 38), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(
ReadResult::Value(Bytes({4, 5, 6, 7, 8, 9}),
{StorageGeneration::FromString("g1"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7, 8, 9}),
StorageGeneration::FromString("g1"), read_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionReadThenCommit) {
tensorstore::Transaction txn(tensorstore::isolated);
auto memory_store = tensorstore::GetMemoryKeyValueStore();
{
auto future = kvstore::Read(KvStore{store, txn}, GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop();
req(memory_store);
ASSERT_EQ(0, mock_store->read_requests.size());
}
EXPECT_THAT(future.result(),
::testing::Optional(MatchesKvsReadResultNotFound()));
}
auto commit_future = txn.CommitAsync();
TENSORSTORE_ASSERT_OK(commit_future.result());
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentModificationAfterReadingShardIndex) {
absl::Time init_time = absl::Now();
kvstore::ReadOptions options;
options.staleness_bound = init_time;
auto future = store->Read(GetChunkKey(0x1), options);
absl::Time abort_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(init_time, req.options.staleness_bound);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g2"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g2"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
abort_time = absl::Now();
req.promise.SetResult(ReadResult::Unspecified(
{StorageGeneration::FromString("g2"), abort_time}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Ge(abort_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
7, 0, 0, 0, 0, 0, 0, 0,
33, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g3"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g3"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(39, 65), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g3"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g3"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Value(
Bytes({4, 5, 6, 7}), {StorageGeneration::FromString("g3"), read_time}));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesKvsReadResult(Bytes({4, 5, 6, 7}),
StorageGeneration::FromString("g3"), read_time));
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingShardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g4"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g4"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Missing(read_time));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest,
ReadConcurrentDeleteAfterReadingMinishardIndex) {
auto req_time = UniqueNow();
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.options.staleness_bound, ::testing::Gt(req_time));
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
absl::Time read_time;
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
read_time = absl::Now();
req.promise.SetResult(ReadResult::Missing(read_time));
}
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(), MatchesKvsReadResultNotFound(read_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingShardIndex) {
auto future = store->Read(GetChunkKey(0x50), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(0, 16), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading minishard 0 in \"prefix/0\\.shard\": "
"Error retrieving shard index entry: "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingMinishardShardIndex) {
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(
future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading minishard 1 in \"prefix/0\\.shard\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadErrorReadingData) {
auto future = store->Read(GetChunkKey(0x1), {});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(16, 32), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
32, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(38, 64), req.options.byte_range);
req.promise.SetResult(
ReadResult::Value(Bytes({
0x1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(OptionalByteRangeRequest(32, 36), req.options.byte_range);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown, "Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, ReadInvalidKey) {
auto future = store->Read("abc", {});
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteInvalidKey) {
auto future = store->Write("abc", absl::Cord("x"));
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteInvalidKey) {
auto future = store->Delete("abc");
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShard) {
for (const bool with_max_chunks : {false, true}) {
SCOPED_TRACE(tensorstore::StrCat("with_max_chunks=", with_max_chunks));
if (with_max_chunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 2;
});
} else {
store = GetStore();
}
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, UnconditionalWrite) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 2;
});
auto txn = Transaction(tensorstore::isolated);
auto future1 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0x50),
Bytes({1, 2, 3}));
auto future2 = kvstore::WriteCommitted(KvStore{store, txn}, GetChunkKey(0x54),
Bytes({4, 5, 6}));
ASSERT_EQ(0, mock_store->read_requests.size());
ASSERT_EQ(0, mock_store->write_requests.size());
txn.CommitAsync().IgnoreFuture();
ASSERT_EQ(0, mock_store->read_requests.size());
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
54, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
4, 5, 6,
0x50, 0, 0, 0, 0, 0, 0, 0,
0x04, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future1.ready());
ASSERT_TRUE(future2.ready());
EXPECT_THAT(future1.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
EXPECT_THAT(future2.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, ConditionalWriteDespiteMaxChunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 1;
});
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}),
{StorageGeneration::NoValue()});
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::NoValue(),
req.options.generation_conditions.if_equal);
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithNoExistingShardError) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
{
auto req = mock_store->write_requests.pop_nonblock().value();
req.promise.SetResult(absl::UnknownError("Write error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error writing \"prefix/0\\.shard\": "
"Write error"));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShard) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(
ReadResult::Value(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
4, 5, 6,
0x70, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
}),
{StorageGeneration::FromString("g0"), absl::Now()}));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::FromString("g0"),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
6, 0, 0, 0, 0, 0, 0, 0,
54, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
4, 5, 6,
0x50, 0, 0, 0, 0, 0, 0, 0,
0x20, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g1"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g1"), write_time));
}
TEST_F(UnderlyingKeyValueStoreTest, WriteMaxChunksWithExistingShard) {
for (const bool specify_max_chunks : {false, true}) {
if (specify_max_chunks) {
store = GetStore(
[](uint64_t shard) -> uint64_t {
return 1;
});
}
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
if (!specify_max_chunks) {
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
req.promise.SetResult(ReadResult::Missing(absl::Now()));
}
absl::Time write_time;
{
auto req = mock_store->write_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->write_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ((specify_max_chunks ? StorageGeneration::Unknown()
: StorageGeneration::NoValue()),
req.options.generation_conditions.if_equal);
EXPECT_THAT(req.value, ::testing::Optional(Bytes({
3, 0, 0, 0, 0, 0, 0, 0,
27, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 2, 3,
0x50, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0,
})));
write_time = absl::Now();
req.promise.SetResult(std::in_place, StorageGeneration::FromString("g0"),
write_time);
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesTimestampedStorageGeneration(
StorageGeneration::FromString("g0"), write_time));
}
}
TEST_F(UnderlyingKeyValueStoreTest, WriteWithExistingShardReadError) {
auto future = store->Write(GetChunkKey(0x50), Bytes({1, 2, 3}));
{
auto req = mock_store->read_requests.pop_nonblock().value();
ASSERT_EQ(0, mock_store->read_requests.size());
EXPECT_EQ("prefix/0.shard", req.key);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_equal);
EXPECT_EQ(StorageGeneration::Unknown(),
req.options.generation_conditions.if_not_equal);
req.promise.SetResult(absl::UnknownError("Read error"));
}
ASSERT_TRUE(future.ready());
EXPECT_THAT(future.result(),
MatchesStatus(absl::StatusCode::kUnknown,
"Error reading \"prefix/0\\.shard\": "
"Read error"));
}
TEST_F(UnderlyingKeyValueStoreTest, DeleteRangeUnimplemented) {
EXPECT_THAT(store->DeleteRange(tensorstore::KeyRange::Prefix("abc")).result(),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST_F(UnderlyingKeyValueStoreTest, TransactionalDeleteRangeUnimplemented) {
EXPECT_THAT(
store->TransactionalDeleteRange({}, tensorstore::KeyRange::Prefix("abc")),
MatchesStatus(absl::StatusCode::kUnimplemented));
}
TEST_F(UnderlyingKeyValueStoreTest, BatchRead) {
cache_pool = CachePool::Make({});
auto memory_store = tensorstore::GetMemoryKeyValueStore();
mock_store->forward_to = memory_store;
mock_store->log_requests = true;
mock_store->handle_batch_requests = true;
auto store = GetStore(
[](uint64_t shard) -> uint64_t {
return 6;
});
auto key0 = GetChunkKey(0x50);
auto key1 = GetChunkKey(0x54);
auto key2 = GetChunkKey(0x58);
auto key3 = GetChunkKey(0x51);
auto key4 = GetChunkKey(0x55);
auto key5 = GetChunkKey(0x59);
auto key6 = GetChunkKey(0x52);
auto key7 = GetChunkKey(0x56);
auto key8 = GetChunkKey(0x5a);
TENSORSTORE_ASSERT_OK(store->Write(key0, absl::Cord("abc")).result());
TENSORSTORE_ASSERT_OK(store->Write(key1, absl::Cord("def")).result());
TENSORSTORE_ASSERT_OK(store->Write(key3, absl::Cord("key3-")).result());
TENSORSTORE_ASSERT_OK(store->Write(key4, absl::Cord("key4--")).result());
TENSORSTORE_ASSERT_OK(store->Write(key5, absl::Cord("key5---")).result());
TENSORSTORE_ASSERT_OK(store->Write(key6, absl::Cord("key6----")).result());
TENSORSTORE_ASSERT_OK(store->Write(key7, absl::Cord("key6-----")).result());
TENSORSTORE_ASSERT_OK(store->Write(key8, absl::Cord("key6------")).result());
mock_store->request_log.pop_all();
{
SCOPED_TRACE(
"Read 2/6 chunks from the same shard (same minibatch) in a single "
"batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key1, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(3));
}
{
SCOPED_TRACE("Read 6/6 entries from the same shard in a single batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key1, options),
store->Read(key2, options),
store->Read(key3, options),
store->Read(key4, options),
store->Read(key5, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(futures[3].result(), MatchesKvsReadResult(absl::Cord("key3-")));
EXPECT_THAT(futures[4].result(),
MatchesKvsReadResult(absl::Cord("key4--")));
EXPECT_THAT(futures[5].result(),
MatchesKvsReadResult(absl::Cord("key5---")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(1));
}
{
SCOPED_TRACE(
"Read 6/6 entries from the same shard with inconsistent generation "
"constraints");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options1;
options1.batch = Batch::New();
kvstore::ReadOptions options2;
options2.batch = options1.batch;
options2.generation_conditions.if_not_equal =
StorageGeneration::Invalid();
kvstore::ReadOptions options3;
options3.batch = options1.batch;
options3.generation_conditions.if_equal = StorageGeneration::Invalid();
futures = {
store->Read(key0, options1),
store->Read(key1, options1),
store->Read(key2, options2),
store->Read(key3, options1),
store->Read(key4, options3),
store->Read(key5, options1),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(), MatchesKvsReadResult(absl::Cord("def")));
EXPECT_THAT(futures[2].result(), MatchesKvsReadResultNotFound());
EXPECT_THAT(futures[3].result(), MatchesKvsReadResult(absl::Cord("key3-")));
EXPECT_THAT(futures[4].result(), MatchesKvsReadResultAborted());
EXPECT_THAT(futures[5].result(),
MatchesKvsReadResult(absl::Cord("key5---")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(3));
}
{
SCOPED_TRACE("Read 1 entry from each of two shards in a single batch");
std::vector<Future<kvstore::ReadResult>> futures;
{
kvstore::ReadOptions options;
options.batch = Batch::New();
futures = {
store->Read(key0, options),
store->Read(key6, options),
};
}
EXPECT_THAT(futures[0].result(), MatchesKvsReadResult(absl::Cord("abc")));
EXPECT_THAT(futures[1].result(),
MatchesKvsReadResult(absl::Cord("key6----")));
EXPECT_THAT(mock_store->request_log.pop_all(), ::testing::SizeIs(6));
}
}
class ReadModifyWriteTest : public ::testing::Test {
protected:
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
MockKeyValueStore::MockPtr mock_store = MockKeyValueStore::Make();
tensorstore::kvstore::DriverPtr memory_store =
tensorstore::GetMemoryKeyValueStore();
kvstore::DriverPtr GetStore(
tensorstore::neuroglancer_uint64_sharded::GetMaxChunksPerShardFunction
get_max_chunks_per_shard = {}) {
return GetShardedKeyValueStore(
mock_store, tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(CachePool::Make(CachePool::Limits{})),
std::move(get_max_chunks_per_shard));
}
auto GetKvsBackedCache(kvstore::DriverPtr store = {}) {
if (!store) store = GetStore();
return GetCache<KvsBackedTestCache>(
CachePool::Make(CachePool::Limits{}).get(), "",
[&] { return std::make_unique<KvsBackedTestCache>(store); });
}
};
TEST_F(ReadModifyWriteTest, MultipleCaches) {
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "def"));
auto read_future =
GetCacheEntry(cache1, GetChunkKey(0x0))->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdef")));
}
transaction.CommitAsync().IgnoreFuture();
auto write_req = mock_store->write_requests.pop();
write_req(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TEST_F(ReadModifyWriteTest, MultiplePhasesMultipleCaches) {
auto cache1 = GetKvsBackedCache();
auto cache2 = GetKvsBackedCache();
auto transaction = Transaction(tensorstore::isolated);
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto open_transaction,
tensorstore::internal::AcquireOpenTransactionPtrOrError(transaction));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "abc"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "def"));
open_transaction->Barrier();
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache1, GetChunkKey(0x0))
->Modify(open_transaction, false, "ghi"));
TENSORSTORE_ASSERT_OK(GetCacheEntry(cache2, GetChunkKey(0x0))
->Modify(open_transaction, false, "jkl"));
auto read_future =
GetCacheEntry(cache1, GetChunkKey(0x0))->ReadValue(open_transaction);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
EXPECT_THAT(read_future.result(),
::testing::Optional(absl::Cord("abcdefghijkl")));
}
transaction.CommitAsync().IgnoreFuture();
mock_store->write_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->read_requests.pop()(memory_store);
mock_store->write_requests.pop()(memory_store);
TENSORSTORE_EXPECT_OK(transaction.future());
}
TENSORSTORE_GLOBAL_INITIALIZER {
using ::tensorstore::internal::KvsBackedCacheBasicTransactionalTestOptions;
using ::tensorstore::internal::RegisterKvsBackedCacheBasicTransactionalTest;
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
ShardingSpec sharding_spec =
ShardingSpec::FromJson(sharding_spec_json).value();
for (bool underlying_atomic : {false, true}) {
KvsBackedCacheBasicTransactionalTestOptions options;
options.test_name = tensorstore::StrCat("Uint64Sharded/underlying_atomic=",
underlying_atomic);
options.get_store = [=] {
return GetShardedKeyValueStore(
tensorstore::GetMemoryKeyValueStore(underlying_atomic),
tensorstore::InlineExecutor{}, "prefix", sharding_spec,
CachePool::WeakPtr(CachePool::Make(CachePool::Limits{})), {});
};
options.delete_range_supported = false;
options.multi_key_atomic_supported = true;
options.get_key_getter = [] {
return [getter = std::make_shared<GetUint64Key>(true)](
auto key) { return (*getter)(key); };
};
RegisterKvsBackedCacheBasicTransactionalTest(options);
}
}
TEST(ShardedKeyValueStoreTest, SpecRoundtrip) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "memory"}, {"path", "abc/"}};
options.full_spec = {{"driver", "neuroglancer_uint64_sharded"},
{"base", options.full_base_spec},
{"metadata", sharding_spec_json}};
options.check_data_after_serialization = false;
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, SpecRoundtripFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.roundtrip_key = std::string(8, '\0');
options.full_base_spec = {{"driver", "file"}, {"path", tempdir.path() + "/"}};
options.full_spec = {{"driver", "neuroglancer_uint64_sharded"},
{"base", options.full_base_spec},
{"metadata", sharding_spec_json}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(ShardedKeyValueStoreTest, Base) {
::nlohmann::json sharding_spec_json{
{"@type", "neuroglancer_uint64_sharded_v1"},
{"hash", "identity"},
{"preshift_bits", 0},
{"minishard_bits", 1},
{"shard_bits", 1},
{"data_encoding", "raw"},
{"minishard_index_encoding", "raw"}};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
kvstore::Spec::FromJson({{"driver", "neuroglancer_uint64_sharded"},
{"base", "memory:
{"metadata", sharding_spec_json},
{"path", "1"}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_spec,
kvstore::Spec::FromJson("memory:
EXPECT_THAT(spec.base(), ::testing::Optional(base_spec));
auto context = tensorstore::Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open(spec, context).result());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto base_store,
kvstore::Open(base_spec, context).result());
EXPECT_THAT(store.base(), ::testing::Optional(base_store));
auto transaction = tensorstore::Transaction(tensorstore::atomic_isolated);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store_with_txn, store | transaction);
EXPECT_THAT(store_with_txn.base(), base_store | transaction);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/neuroglancer_uint64_sharded/neuroglancer_uint64_sharded_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
daaeac09-e0ed-4298-b4dd-3f9607b3f0d2 | cpp | abseil/abseil-cpp | btree | absl/container/internal/btree.h | absl/container/btree_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
#define ABSL_CONTAINER_INTERNAL_BTREE_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <iterator>
#include <limits>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/common_policy_traits.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
!defined(NDEBUG_SANITIZER)
#define ABSL_BTREE_ENABLE_GENERATIONS
#endif
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
constexpr bool BtreeGenerationsEnabled() { return true; }
#else
constexpr bool BtreeGenerationsEnabled() { return false; }
#endif
template <typename Compare, typename T, typename U>
using compare_result_t = absl::result_of_t<const Compare(const T &, const U &)>;
template <typename Compare, typename T>
using btree_is_key_compare_to =
std::is_convertible<compare_result_t<Compare, T, T>, absl::weak_ordering>;
struct StringBtreeDefaultLess {
using is_transparent = void;
StringBtreeDefaultLess() = default;
StringBtreeDefaultLess(std::less<std::string>) {}
StringBtreeDefaultLess(std::less<absl::string_view>) {}
explicit operator std::less<std::string>() const { return {}; }
explicit operator std::less<absl::string_view>() const { return {}; }
explicit operator std::less<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
StringBtreeDefaultLess(std::less<absl::Cord>) {}
absl::weak_ordering operator()(const absl::Cord &lhs,
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
}
absl::weak_ordering operator()(const absl::Cord &lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
}
absl::weak_ordering operator()(absl::string_view lhs,
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
}
};
struct StringBtreeDefaultGreater {
using is_transparent = void;
StringBtreeDefaultGreater() = default;
StringBtreeDefaultGreater(std::greater<std::string>) {}
StringBtreeDefaultGreater(std::greater<absl::string_view>) {}
explicit operator std::greater<std::string>() const { return {}; }
explicit operator std::greater<absl::string_view>() const { return {}; }
explicit operator std::greater<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
StringBtreeDefaultGreater(std::greater<absl::Cord>) {}
absl::weak_ordering operator()(const absl::Cord &lhs,
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
}
absl::weak_ordering operator()(const absl::Cord &lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
}
absl::weak_ordering operator()(absl::string_view lhs,
const absl::Cord &rhs) const {
return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
}
};
template <typename Compare, bool is_class = std::is_class<Compare>::value>
struct checked_compare_base : Compare {
using Compare::Compare;
explicit checked_compare_base(Compare c) : Compare(std::move(c)) {}
const Compare &comp() const { return *this; }
};
template <typename Compare>
struct checked_compare_base<Compare, false> {
explicit checked_compare_base(Compare c) : compare(std::move(c)) {}
const Compare &comp() const { return compare; }
Compare compare;
};
struct BtreeTestOnlyCheckedCompareOptOutBase {};
template <typename Compare, typename Key>
struct key_compare_adapter {
struct checked_compare : checked_compare_base<Compare> {
private:
using Base = typename checked_compare::checked_compare_base;
using Base::comp;
bool is_self_equivalent(const Key &k) const {
return comp()(k, k) == 0;
}
template <typename T>
bool is_self_equivalent(const T &) const {
return true;
}
public:
using Base::Base;
checked_compare(Compare comp) : Base(std::move(comp)) {}
explicit operator Compare() const { return comp(); }
template <typename T, typename U,
absl::enable_if_t<
std::is_same<bool, compare_result_t<Compare, T, U>>::value,
int> = 0>
bool operator()(const T &lhs, const U &rhs) const {
assert(is_self_equivalent(lhs));
assert(is_self_equivalent(rhs));
const bool lhs_comp_rhs = comp()(lhs, rhs);
assert(!lhs_comp_rhs || !comp()(rhs, lhs));
return lhs_comp_rhs;
}
template <
typename T, typename U,
absl::enable_if_t<std::is_convertible<compare_result_t<Compare, T, U>,
absl::weak_ordering>::value,
int> = 0>
absl::weak_ordering operator()(const T &lhs, const U &rhs) const {
assert(is_self_equivalent(lhs));
assert(is_self_equivalent(rhs));
const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs);
#ifndef NDEBUG
const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs);
if (lhs_comp_rhs > 0) {
assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0");
} else if (lhs_comp_rhs == 0) {
assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0");
} else {
assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
}
#endif
return lhs_comp_rhs;
}
};
using type = absl::conditional_t<
std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase, Compare>::value,
Compare, checked_compare>;
};
template <>
struct key_compare_adapter<std::less<std::string>, std::string> {
using type = StringBtreeDefaultLess;
};
template <>
struct key_compare_adapter<std::greater<std::string>, std::string> {
using type = StringBtreeDefaultGreater;
};
template <>
struct key_compare_adapter<std::less<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultLess;
};
template <>
struct key_compare_adapter<std::greater<absl::string_view>, absl::string_view> {
using type = StringBtreeDefaultGreater;
};
template <>
struct key_compare_adapter<std::less<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultLess;
};
template <>
struct key_compare_adapter<std::greater<absl::Cord>, absl::Cord> {
using type = StringBtreeDefaultGreater;
};
template <typename T, typename = void>
struct has_linear_node_search_preference : std::false_type {};
template <typename T, typename = void>
struct prefers_linear_node_search : std::false_type {};
template <typename T>
struct has_linear_node_search_preference<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: std::true_type {};
template <typename T>
struct prefers_linear_node_search<
T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
: T::absl_btree_prefer_linear_node_search {};
template <typename Compare, typename Key>
constexpr bool compare_has_valid_result_type() {
using compare_result_type = compare_result_t<Compare, Key, Key>;
return std::is_same<compare_result_type, bool>::value ||
std::is_convertible<compare_result_type, absl::weak_ordering>::value;
}
template <typename original_key_compare, typename value_type>
class map_value_compare {
template <typename Params>
friend class btree;
protected:
explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {}
original_key_compare comp;
public:
auto operator()(const value_type &lhs, const value_type &rhs) const
-> decltype(comp(lhs.first, rhs.first)) {
return comp(lhs.first, rhs.first);
}
};
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool IsMulti, bool IsMap, typename SlotPolicy>
struct common_params : common_policy_traits<SlotPolicy> {
using original_key_compare = Compare;
using key_compare =
absl::conditional_t<!compare_has_valid_result_type<Compare, Key>(),
Compare,
typename key_compare_adapter<Compare, Key>::type>;
static constexpr bool kIsKeyCompareStringAdapted =
std::is_same<key_compare, StringBtreeDefaultLess>::value ||
std::is_same<key_compare, StringBtreeDefaultGreater>::value;
static constexpr bool kIsKeyCompareTransparent =
IsTransparent<original_key_compare>::value || kIsKeyCompareStringAdapted;
using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
using allocator_type = Alloc;
using key_type = Key;
using size_type = size_t;
using difference_type = ptrdiff_t;
using slot_policy = SlotPolicy;
using slot_type = typename slot_policy::slot_type;
using value_type = typename slot_policy::value_type;
using init_type = typename slot_policy::mutable_value_type;
using pointer = value_type *;
using const_pointer = const value_type *;
using reference = value_type &;
using const_reference = const value_type &;
using value_compare =
absl::conditional_t<IsMap,
map_value_compare<original_key_compare, value_type>,
original_key_compare>;
using is_map_container = std::integral_constant<bool, IsMap>;
template <typename LookupKey>
constexpr static bool can_have_multiple_equivalent_keys() {
return IsMulti || (IsTransparent<key_compare>::value &&
!std::is_same<LookupKey, Key>::value &&
!kIsKeyCompareStringAdapted);
}
enum {
kTargetNodeSize = TargetNodeSize,
kNodeSlotSpace = TargetNodeSize - (sizeof(void *) + 4),
};
using node_count_type =
absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) >
(std::numeric_limits<uint8_t>::max)()),
uint16_t, uint8_t>;
};
template <typename Compare>
struct upper_bound_adapter {
explicit upper_bound_adapter(const Compare &c) : comp(c) {}
template <typename K1, typename K2>
bool operator()(const K1 &a, const K2 &b) const {
return !compare_internal::compare_result_as_less_than(comp(b, a));
}
private:
Compare comp;
};
enum class MatchKind : uint8_t { kEq, kNe };
template <typename V, bool IsCompareTo>
struct SearchResult {
V value;
MatchKind match;
static constexpr bool HasMatch() { return true; }
bool IsEq() const { return match == MatchKind::kEq; }
};
template <typename V>
struct SearchResult<V, false> {
SearchResult() = default;
explicit SearchResult(V v) : value(v) {}
SearchResult(V v, MatchKind ) : value(v) {}
V value;
static constexpr bool HasMatch() { return false; }
static constexpr bool IsEq() { return false; }
};
template <typename Params>
class btree_node {
using is_key_compare_to = typename Params::is_key_compare_to;
using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type;
using original_key_compare = typename Params::original_key_compare;
public:
using params_type = Params;
using key_type = typename Params::key_type;
using value_type = typename Params::value_type;
using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer;
using reference = typename Params::reference;
using const_reference = typename Params::const_reference;
using key_compare = typename Params::key_compare;
using size_type = typename Params::size_type;
using difference_type = typename Params::difference_type;
using use_linear_search = std::integral_constant<
bool, has_linear_node_search_preference<original_key_compare>::value
? prefers_linear_node_search<original_key_compare>::value
: has_linear_node_search_preference<key_type>::value
? prefers_linear_node_search<key_type>::value
: std::is_arithmetic<key_type>::value &&
(std::is_same<std::less<key_type>,
original_key_compare>::value ||
std::is_same<std::greater<key_type>,
original_key_compare>::value)>;
~btree_node() = default;
btree_node(btree_node const &) = delete;
btree_node &operator=(btree_node const &) = delete;
protected:
btree_node() = default;
private:
using layout_type =
absl::container_internal::Layout<btree_node *, uint32_t, field_type,
slot_type, btree_node *>;
using leaf_layout_type = typename layout_type::template WithStaticSizes<
1,
BtreeGenerationsEnabled() ? 1 : 0,
4>;
constexpr static size_type SizeWithNSlots(size_type n) {
return leaf_layout_type( n, 0).AllocSize();
}
constexpr static size_type MinimumOverhead() {
return SizeWithNSlots(1) - sizeof(slot_type);
}
constexpr static size_type NodeTargetSlots(const size_type begin,
const size_type end) {
return begin == end ? begin
: SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
? NodeTargetSlots(begin, (begin + end) / 2)
: NodeTargetSlots((begin + end) / 2 + 1, end);
}
constexpr static size_type kTargetNodeSize = params_type::kTargetNodeSize;
constexpr static size_type kNodeTargetSlots =
NodeTargetSlots(0, kTargetNodeSize);
constexpr static size_type kMinNodeSlots = 4;
constexpr static size_type kNodeSlots =
kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots;
using internal_layout_type = typename layout_type::template WithStaticSizes<
1,
BtreeGenerationsEnabled() ? 1 : 0,
4, kNodeSlots,
kNodeSlots + 1>;
constexpr static field_type kInternalNodeMaxCount = 0;
constexpr static leaf_layout_type LeafLayout(
const size_type slot_count = kNodeSlots) {
return leaf_layout_type(slot_count, 0);
}
constexpr static auto InternalLayout() { return internal_layout_type(); }
constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
}
constexpr static size_type InternalSize() {
return InternalLayout().AllocSize();
}
constexpr static size_type Alignment() {
static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
"Alignment of all nodes must be equal.");
return InternalLayout().Alignment();
}
template <size_type N>
inline typename layout_type::template ElementType<N> *GetField() {
assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
}
template <size_type N>
inline const typename layout_type::template ElementType<N> *GetField() const {
assert(N < 4 || is_internal());
return InternalLayout().template Pointer<N>(
reinterpret_cast<const char *>(this));
}
void set_parent(btree_node *p) { *GetField<0>() = p; }
field_type &mutable_finish() { return GetField<2>()[2]; }
slot_type *slot(size_type i) { return &GetField<3>()[i]; }
slot_type *start_slot() { return slot(start()); }
slot_type *finish_slot() { return slot(finish()); }
const slot_type *slot(size_type i) const { return &GetField<3>()[i]; }
void set_position(field_type v) { GetField<2>()[0] = v; }
void set_start(field_type v) { GetField<2>()[1] = v; }
void set_finish(field_type v) { GetField<2>()[2] = v; }
void set_max_count(field_type v) { GetField<2>()[3] = v; }
public:
bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; }
bool is_internal() const { return !is_leaf(); }
field_type position() const { return GetField<2>()[0]; }
field_type start() const {
assert(GetField<2>()[1] == 0);
return 0;
}
field_type finish() const { return GetField<2>()[2]; }
field_type count() const {
assert(finish() >= start());
return finish() - start();
}
field_type max_count() const {
const field_type max_count = GetField<2>()[3];
return max_count == field_type{kInternalNodeMaxCount}
? field_type{kNodeSlots}
: max_count;
}
btree_node *parent() const { return *GetField<0>(); }
bool is_root() const { return parent()->is_leaf(); }
void make_root() {
assert(parent()->is_root());
set_generation(parent()->generation());
set_parent(parent()->parent());
}
uint32_t *get_root_generation() const {
assert(BtreeGenerationsEnabled());
const btree_node *curr = this;
for (; !curr->is_root(); curr = curr->parent()) continue;
return const_cast<uint32_t *>(&curr->GetField<1>()[0]);
}
uint32_t generation() const {
return BtreeGenerationsEnabled() ? *get_root_generation() : 0;
}
void set_generation(uint32_t generation) {
if (BtreeGenerationsEnabled()) GetField<1>()[0] = generation;
}
void next_generation() {
if (BtreeGenerationsEnabled()) ++*get_root_generation();
}
const key_type &key(size_type i) const { return params_type::key(slot(i)); }
reference value(size_type i) { return params_type::element(slot(i)); }
const_reference value(size_type i) const {
return params_type::element(slot(i));
}
btree_node *child(field_type i) const { return GetField<4>()[i]; }
btree_node *start_child() const { return child(start()); }
btree_node *&mutable_child(field_type i) { return GetField<4>()[i]; }
void clear_child(field_type i) {
absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
}
void set_child_noupdate_position(field_type i, btree_node *c) {
absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
mutable_child(i) = c;
}
void set_child(field_type i, btree_node *c) {
set_child_noupdate_position(i, c);
c->set_position(i);
}
void init_child(field_type i, btree_node *c) {
set_child(i, c);
c->set_parent(this);
}
template <typename K>
SearchResult<size_type, is_key_compare_to::value> lower_bound(
const K &k, const key_compare &comp) const {
return use_linear_search::value ? linear_search(k, comp)
: binary_search(k, comp);
}
template <typename K>
size_type upper_bound(const K &k, const key_compare &comp) const {
auto upper_compare = upper_bound_adapter<key_compare>(comp);
return use_linear_search::value ? linear_search(k, upper_compare).value
: binary_search(k, upper_compare).value;
}
template <typename K, typename Compare>
SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
linear_search(const K &k, const Compare &comp) const {
return linear_search_impl(k, start(), finish(), comp,
btree_is_key_compare_to<Compare, key_type>());
}
template <typename K, typename Compare>
SearchResult<size_type, btree_is_key_compare_to<Compare, key_type>::value>
binary_search(const K &k, const Compare &comp) const {
return binary_search_impl(k, start(), finish(), comp,
btree_is_key_compare_to<Compare, key_type>());
}
template <typename K, typename Compare>
SearchResult<size_type, false> linear_search_impl(
const K &k, size_type s, const size_type e, const Compare &comp,
std::false_type ) const {
while (s < e) {
if (!comp(key(s), k)) {
break;
}
++s;
}
return SearchResult<size_type, false>{s};
}
template <typename K, typename Compare>
SearchResult<size_type, true> linear_search_impl(
const K &k, size_type s, const size_type e, const Compare &comp,
std::true_type ) const {
while (s < e) {
const absl::weak_ordering c = comp(key(s), k);
if (c == 0) {
return {s, MatchKind::kEq};
} else if (c > 0) {
break;
}
++s;
}
return {s, MatchKind::kNe};
}
template <typename K, typename Compare>
SearchResult<size_type, false> binary_search_impl(
const K &k, size_type s, size_type e, const Compare &comp,
std::false_type ) const {
while (s != e) {
const size_type mid = (s + e) >> 1;
if (comp(key(mid), k)) {
s = mid + 1;
} else {
e = mid;
}
}
return SearchResult<size_type, false>{s};
}
template <typename K, typename CompareTo>
SearchResult<size_type, true> binary_search_impl(
const K &k, size_type s, size_type e, const CompareTo &comp,
std::true_type ) const {
if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe;
while (s != e) {
const size_type mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
if (c < 0) {
s = mid + 1;
} else {
e = mid;
if (c == 0) {
exact_match = MatchKind::kEq;
}
}
}
return {s, exact_match};
} else {
while (s != e) {
const size_type mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
if (c < 0) {
s = mid + 1;
} else if (c > 0) {
e = mid;
} else {
return {mid, MatchKind::kEq};
}
}
return {s, MatchKind::kNe};
}
}
template <typename Compare>
bool is_ordered_correctly(field_type i, const Compare &comp) const {
if (std::is_base_of<BtreeTestOnlyCheckedCompareOptOutBase,
Compare>::value ||
params_type::kIsKeyCompareStringAdapted) {
return true;
}
const auto compare = [&](field_type a, field_type b) {
const absl::weak_ordering cmp =
compare_internal::do_three_way_comparison(comp, key(a), key(b));
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
};
int cmp = -1;
constexpr bool kCanHaveEquivKeys =
params_type::template can_have_multiple_equivalent_keys<key_type>();
for (field_type j = start(); j < finish(); ++j) {
if (j == i) {
if (cmp > 0) return false;
continue;
}
int new_cmp = compare(j, i);
if (new_cmp < cmp || (!kCanHaveEquivKeys && new_cmp == 0)) return false;
cmp = new_cmp;
}
return true;
}
template <typename... Args>
void emplace_value(field_type i, allocator_type *alloc, Args &&...args);
void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
void rebalance_right_to_left(field_type to_move, btree_node *right,
allocator_type *alloc);
void rebalance_left_to_right(field_type to_move, btree_node *right,
allocator_type *alloc);
void split(int insert_position, btree_node *dest, allocator_type *alloc);
void merge(btree_node *src, allocator_type *alloc);
void init_leaf(field_type position, field_type max_count,
btree_node *parent) {
set_generation(0);
set_parent(parent);
set_position(position);
set_start(0);
set_finish(0);
set_max_count(max_count);
absl::container_internal::SanitizerPoisonMemoryRegion(
start_slot(), max_count * sizeof(slot_type));
}
void init_internal(field_type position, btree_node *parent) {
init_leaf(position, kNodeSlots, parent);
set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
&mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
}
static void deallocate(const size_type size, btree_node *node,
allocator_type *alloc) {
absl::container_internal::SanitizerUnpoisonMemoryRegion(node, size);
absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
}
static void clear_and_delete(btree_node *node, allocator_type *alloc);
private:
template <typename... Args>
void value_init(const field_type i, allocator_type *alloc, Args &&...args) {
next_generation();
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
void value_destroy(const field_type i, allocator_type *alloc) {
next_generation();
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
void value_destroy_n(const field_type i, const field_type n,
allocator_type *alloc) {
next_generation();
for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
params_type::destroy(alloc, s);
absl::container_internal::SanitizerPoisonObject(s);
}
}
static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
absl::container_internal::SanitizerUnpoisonObject(dest);
params_type::transfer(alloc, dest, src);
absl::container_internal::SanitizerPoisonObject(src);
}
void transfer(const size_type dest_i, const size_type src_i,
btree_node *src_node, allocator_type *alloc) {
next_generation();
transfer(slot(dest_i), src_node->slot(src_i), alloc);
}
void transfer_n(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
next_generation();
for (slot_type *src = src_node->slot(src_i), *end = src + n,
*dest = slot(dest_i);
src != end; ++src, ++dest) {
transfer(dest, src, alloc);
}
}
void transfer_n_backward(const size_type n, const size_type dest_i,
const size_type src_i, btree_node *src_node,
allocator_type *alloc) {
next_generation();
for (slot_type *src = src_node->slot(src_i + n), *end = src - n,
*dest = slot(dest_i + n);
src != end; --src, --dest) {
transfer(dest - 1, src - 1, alloc);
}
}
template <typename P>
friend class btree;
template <typename N, typename R, typename P>
friend class btree_iterator;
friend class BtreeNodePeer;
friend struct btree_access;
};
template <typename Node>
bool AreNodesFromSameContainer(const Node *node_a, const Node *node_b) {
if (node_a == nullptr || node_b == nullptr) return true;
while (!node_a->is_root()) node_a = node_a->parent();
while (!node_b->is_root()) node_b = node_b->parent();
return node_a == node_b;
}
class btree_iterator_generation_info_enabled {
public:
explicit btree_iterator_generation_info_enabled(uint32_t g)
: generation_(g) {}
template <typename Node>
void update_generation(const Node *node) {
if (node != nullptr) generation_ = node->generation();
}
uint32_t generation() const { return generation_; }
template <typename Node>
void assert_valid_generation(const Node *node) const {
if (node != nullptr && node->generation() != generation_) {
ABSL_INTERNAL_LOG(
FATAL,
"Attempting to use an invalidated iterator. The corresponding b-tree "
"container has been mutated since this iterator was constructed.");
}
}
private:
uint32_t generation_;
};
class btree_iterator_generation_info_disabled {
public:
explicit btree_iterator_generation_info_disabled(uint32_t) {}
static void update_generation(const void *) {}
static uint32_t generation() { return 0; }
static void assert_valid_generation(const void *) {}
};
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
using btree_iterator_generation_info = btree_iterator_generation_info_enabled;
#else
using btree_iterator_generation_info = btree_iterator_generation_info_disabled;
#endif
template <typename Node, typename Reference, typename Pointer>
class btree_iterator : private btree_iterator_generation_info {
using field_type = typename Node::field_type;
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
using is_map_container = typename params_type::is_map_container;
using node_type = Node;
using normal_node = typename std::remove_const<Node>::type;
using const_node = const Node;
using normal_pointer = typename params_type::pointer;
using normal_reference = typename params_type::reference;
using const_pointer = typename params_type::const_pointer;
using const_reference = typename params_type::const_reference;
using slot_type = typename params_type::slot_type;
using iterator = absl::conditional_t<
is_map_container::value,
btree_iterator<normal_node, normal_reference, normal_pointer>,
btree_iterator<normal_node, const_reference, const_pointer>>;
using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>;
public:
using difference_type = typename Node::difference_type;
using value_type = typename params_type::value_type;
using pointer = Pointer;
using reference = Reference;
using iterator_category = std::bidirectional_iterator_tag;
btree_iterator() : btree_iterator(nullptr, -1) {}
explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {}
btree_iterator(Node *n, int p)
: btree_iterator_generation_info(n != nullptr ? n->generation()
: ~uint32_t{}),
node_(n),
position_(p) {}
template <typename N, typename R, typename P,
absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
btree_iterator(const btree_iterator<N, R, P> other)
: btree_iterator_generation_info(other),
node_(other.node_),
position_(other.position_) {}
bool operator==(const iterator &other) const {
return Equals(other);
}
bool operator==(const const_iterator &other) const {
return Equals(other);
}
bool operator!=(const iterator &other) const {
return !Equals(other);
}
bool operator!=(const const_iterator &other) const {
return !Equals(other);
}
difference_type operator-(const_iterator other) const {
if (node_ == other.node_) {
if (node_->is_leaf()) return position_ - other.position_;
if (position_ == other.position_) return 0;
}
return distance_slow(other);
}
reference operator*() const {
ABSL_HARDENING_ASSERT(node_ != nullptr);
assert_valid_generation(node_);
ABSL_HARDENING_ASSERT(position_ >= node_->start());
if (position_ >= node_->finish()) {
ABSL_HARDENING_ASSERT(!IsEndIterator() && "Dereferencing end() iterator");
ABSL_HARDENING_ASSERT(position_ < node_->finish());
}
return node_->value(static_cast<field_type>(position_));
}
pointer operator->() const { return &operator*(); }
btree_iterator &operator++() {
increment();
return *this;
}
btree_iterator &operator--() {
decrement();
return *this;
}
btree_iterator operator++(int) {
btree_iterator tmp = *this;
++*this;
return tmp;
}
btree_iterator operator--(int) {
btree_iterator tmp = *this;
--*this;
return tmp;
}
private:
friend iterator;
friend const_iterator;
template <typename Params>
friend class btree;
template <typename Tree>
friend class btree_container;
template <typename Tree>
friend class btree_set_container;
template <typename Tree>
friend class btree_map_container;
template <typename Tree>
friend class btree_multiset_container;
template <typename TreeType, typename CheckerType>
friend class base_checker;
friend struct btree_access;
template <typename N, typename R, typename P,
absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
explicit btree_iterator(const btree_iterator<N, R, P> other)
: btree_iterator_generation_info(other.generation()),
node_(const_cast<node_type *>(other.node_)),
position_(other.position_) {}
bool Equals(const const_iterator other) const {
ABSL_HARDENING_ASSERT(((node_ == nullptr && other.node_ == nullptr) ||
(node_ != nullptr && other.node_ != nullptr)) &&
"Comparing default-constructed iterator with "
"non-default-constructed iterator.");
assert(AreNodesFromSameContainer(node_, other.node_) &&
"Comparing iterators from different containers.");
assert_valid_generation(node_);
other.assert_valid_generation(other.node_);
return node_ == other.node_ && position_ == other.position_;
}
bool IsEndIterator() const {
if (position_ != node_->finish()) return false;
node_type *node = node_;
while (!node->is_root()) {
if (node->position() != node->parent()->finish()) return false;
node = node->parent();
}
return true;
}
difference_type distance_slow(const_iterator other) const;
void increment() {
assert_valid_generation(node_);
if (node_->is_leaf() && ++position_ < node_->finish()) {
return;
}
increment_slow();
}
void increment_slow();
void decrement() {
assert_valid_generation(node_);
if (node_->is_leaf() && --position_ >= node_->start()) {
return;
}
decrement_slow();
}
void decrement_slow();
const key_type &key() const {
return node_->key(static_cast<size_type>(position_));
}
decltype(std::declval<Node *>()->slot(0)) slot() {
return node_->slot(static_cast<size_type>(position_));
}
void update_generation() {
btree_iterator_generation_info::update_generation(node_);
}
Node *node_;
int position_;
};
template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
using field_type = typename node_type::field_type;
struct EmptyNodeType : node_type {
using field_type = typename node_type::field_type;
node_type *parent;
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
uint32_t generation = 0;
#endif
field_type position = 0;
field_type start = 0;
field_type finish = 0;
field_type max_count = node_type::kInternalNodeMaxCount + 1;
constexpr EmptyNodeType() : parent(this) {}
};
static node_type *EmptyNode() {
alignas(node_type::Alignment()) static constexpr EmptyNodeType empty_node;
return const_cast<EmptyNodeType *>(&empty_node);
}
enum : uint32_t {
kNodeSlots = node_type::kNodeSlots,
kMinNodeValues = kNodeSlots / 2,
};
struct node_stats {
using size_type = typename Params::size_type;
node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
node_stats &operator+=(const node_stats &other) {
leaf_nodes += other.leaf_nodes;
internal_nodes += other.internal_nodes;
return *this;
}
size_type leaf_nodes;
size_type internal_nodes;
};
public:
using key_type = typename Params::key_type;
using value_type = typename Params::value_type;
using size_type = typename Params::size_type;
using difference_type = typename Params::difference_type;
using key_compare = typename Params::key_compare;
using original_key_compare = typename Params::original_key_compare;
using value_compare = typename Params::value_compare;
using allocator_type = typename Params::allocator_type;
using reference = typename Params::reference;
using const_reference = typename Params::const_reference;
using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer;
using iterator =
typename btree_iterator<node_type, reference, pointer>::iterator;
using const_iterator = typename iterator::const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
using node_handle_type = node_handle<Params, Params, allocator_type>;
using params_type = Params;
using slot_type = typename Params::slot_type;
private:
template <typename Btree>
void copy_or_move_values_in_order(Btree &other);
constexpr static bool static_assert_validation();
public:
btree(const key_compare &comp, const allocator_type &alloc)
: root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {}
btree(const btree &other) : btree(other, other.allocator()) {}
btree(const btree &other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
copy_or_move_values_in_order(other);
}
btree(btree &&other) noexcept
: root_(std::exchange(other.root_, EmptyNode())),
rightmost_(std::move(other.rightmost_)),
size_(std::exchange(other.size_, 0u)) {
other.mutable_rightmost() = EmptyNode();
}
btree(btree &&other, const allocator_type &alloc)
: btree(other.key_comp(), alloc) {
if (alloc == other.allocator()) {
swap(other);
} else {
copy_or_move_values_in_order(other);
}
}
~btree() {
static_assert(static_assert_validation(), "This call must be elided.");
clear();
}
btree &operator=(const btree &other);
btree &operator=(btree &&other) noexcept;
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
iterator end() { return iterator(rightmost(), rightmost()->finish()); }
const_iterator end() const {
return const_iterator(rightmost(), rightmost()->finish());
}
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const {
return const_reverse_iterator(end());
}
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const {
return const_reverse_iterator(begin());
}
template <typename K>
iterator lower_bound(const K &key) {
return internal_end(internal_lower_bound(key).value);
}
template <typename K>
const_iterator lower_bound(const K &key) const {
return internal_end(internal_lower_bound(key).value);
}
template <typename K>
std::pair<iterator, bool> lower_bound_equal(const K &key) const;
template <typename K>
iterator upper_bound(const K &key) {
return internal_end(internal_upper_bound(key));
}
template <typename K>
const_iterator upper_bound(const K &key) const {
return internal_end(internal_upper_bound(key));
}
template <typename K>
std::pair<iterator, iterator> equal_range(const K &key);
template <typename K>
std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
return const_cast<btree *>(this)->equal_range(key);
}
template <typename K, typename... Args>
std::pair<iterator, bool> insert_unique(const K &key, Args &&...args);
template <typename K, typename... Args>
std::pair<iterator, bool> insert_hint_unique(iterator position, const K &key,
Args &&...args);
template <typename InputIterator,
typename = decltype(std::declval<const key_compare &>()(
params_type::key(*std::declval<InputIterator>()),
std::declval<const key_type &>()))>
void insert_iterator_unique(InputIterator b, InputIterator e, int);
template <typename InputIterator>
void insert_iterator_unique(InputIterator b, InputIterator e, char);
template <typename ValueType>
iterator insert_multi(const key_type &key, ValueType &&v);
template <typename ValueType>
iterator insert_multi(ValueType &&v) {
return insert_multi(params_type::key(v), std::forward<ValueType>(v));
}
template <typename ValueType>
iterator insert_hint_multi(iterator position, ValueType &&v);
template <typename InputIterator>
void insert_iterator_multi(InputIterator b,
InputIterator e);
iterator erase(iterator iter);
std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
template <typename K>
iterator find(const K &key) {
return internal_end(internal_find(key));
}
template <typename K>
const_iterator find(const K &key) const {
return internal_end(internal_find(key));
}
void clear();
void swap(btree &other);
const key_compare &key_comp() const noexcept {
return rightmost_.template get<0>();
}
template <typename K1, typename K2>
bool compare_keys(const K1 &a, const K2 &b) const {
return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
value_compare value_comp() const {
return value_compare(original_key_compare(key_comp()));
}
void verify() const;
size_type size() const { return size_; }
size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
bool empty() const { return size_ == 0; }
size_type height() const {
size_type h = 0;
if (!empty()) {
const node_type *n = root();
do {
++h;
n = n->parent();
} while (n != root());
}
return h;
}
size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; }
size_type internal_nodes() const {
return internal_stats(root()).internal_nodes;
}
size_type nodes() const {
node_stats stats = internal_stats(root());
return stats.leaf_nodes + stats.internal_nodes;
}
size_type bytes_used() const {
node_stats stats = internal_stats(root());
if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
return sizeof(*this) + node_type::LeafSize(root()->max_count());
} else {
return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() +
stats.internal_nodes * node_type::InternalSize();
}
}
static double average_bytes_per_value() {
const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0;
return node_type::LeafSize() / expected_values_per_node;
}
double fullness() const {
if (empty()) return 0.0;
return static_cast<double>(size()) / (nodes() * kNodeSlots);
}
double overhead() const {
if (empty()) return 0.0;
return (bytes_used() - size() * sizeof(value_type)) /
static_cast<double>(size());
}
allocator_type get_allocator() const { return allocator(); }
private:
friend struct btree_access;
node_type *root() { return root_; }
const node_type *root() const { return root_; }
node_type *&mutable_root() noexcept { return root_; }
node_type *rightmost() { return rightmost_.template get<2>(); }
const node_type *rightmost() const { return rightmost_.template get<2>(); }
node_type *&mutable_rightmost() noexcept {
return rightmost_.template get<2>();
}
key_compare *mutable_key_comp() noexcept {
return &rightmost_.template get<0>();
}
node_type *leftmost() { return root()->parent(); }
const node_type *leftmost() const { return root()->parent(); }
allocator_type *mutable_allocator() noexcept {
return &rightmost_.template get<1>();
}
const allocator_type &allocator() const noexcept {
return rightmost_.template get<1>();
}
node_type *allocate(size_type size) {
return reinterpret_cast<node_type *>(
absl::container_internal::Allocate<node_type::Alignment()>(
mutable_allocator(), size));
}
node_type *new_internal_node(field_type position, node_type *parent) {
node_type *n = allocate(node_type::InternalSize());
n->init_internal(position, parent);
return n;
}
node_type *new_leaf_node(field_type position, node_type *parent) {
node_type *n = allocate(node_type::LeafSize());
n->init_leaf(position, kNodeSlots, parent);
return n;
}
node_type *new_leaf_root_node(field_type max_count) {
node_type *n = allocate(node_type::LeafSize(max_count));
n->init_leaf(0, max_count, n);
return n;
}
iterator rebalance_after_delete(iterator iter);
void rebalance_or_split(iterator *iter);
void merge_nodes(node_type *left, node_type *right);
bool try_merge_or_rebalance(iterator *iter);
void try_shrink();
iterator internal_end(iterator iter) {
return iter.node_ != nullptr ? iter : end();
}
const_iterator internal_end(const_iterator iter) const {
return iter.node_ != nullptr ? iter : end();
}
template <typename... Args>
iterator internal_emplace(iterator iter, Args &&...args);
template <typename IterType>
static IterType internal_last(IterType iter);
template <typename K>
SearchResult<iterator, is_key_compare_to::value> internal_locate(
const K &key) const;
template <typename K>
SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
const K &key) const;
template <typename K>
iterator internal_upper_bound(const K &key) const;
template <typename K>
iterator internal_find(const K &key) const;
size_type internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const;
node_stats internal_stats(const node_type *node) const {
if (node == nullptr || (node == root() && empty())) {
return node_stats(0, 0);
}
if (node->is_leaf()) {
return node_stats(1, 0);
}
node_stats res(0, 1);
for (int i = node->start(); i <= node->finish(); ++i) {
res += internal_stats(node->child(i));
}
return res;
}
node_type *root_;
absl::container_internal::CompressedTuple<key_compare, allocator_type,
node_type *>
rightmost_;
size_type size_;
};
template <typename P>
template <typename... Args>
inline void btree_node<P>::emplace_value(const field_type i,
allocator_type *alloc,
Args &&...args) {
assert(i >= start());
assert(i <= finish());
if (i < finish()) {
transfer_n_backward(finish() - i, i + 1, i, this,
alloc);
}
value_init(static_cast<field_type>(i), alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
if (is_internal() && finish() > i + 1) {
for (field_type j = finish(); j > i + 1; --j) {
set_child(j, child(j - 1));
}
clear_child(i + 1);
}
}
template <typename P>
inline void btree_node<P>::remove_values(const field_type i,
const field_type to_erase,
allocator_type *alloc) {
value_destroy_n(i, to_erase, alloc);
const field_type orig_finish = finish();
const field_type src_i = i + to_erase;
transfer_n(orig_finish - src_i, i, src_i, this, alloc);
if (is_internal()) {
for (field_type j = 0; j < to_erase; ++j) {
clear_and_delete(child(i + j + 1), alloc);
}
for (field_type j = i + to_erase + 1; j <= orig_finish; ++j) {
set_child(j - to_erase, child(j));
clear_child(j);
}
}
set_finish(orig_finish - to_erase);
}
template <typename P>
void btree_node<P>::rebalance_right_to_left(field_type to_move,
btree_node *right,
allocator_type *alloc) {
assert(parent() == right->parent());
assert(position() + 1 == right->position());
assert(right->count() >= count());
assert(to_move >= 1);
assert(to_move <= right->count());
transfer(finish(), position(), parent(), alloc);
transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
right->transfer_n(right->count() - to_move, right->start(),
right->start() + to_move, right, alloc);
if (is_internal()) {
for (field_type i = 0; i < to_move; ++i) {
init_child(finish() + i + 1, right->child(i));
}
for (field_type i = right->start(); i <= right->finish() - to_move; ++i) {
assert(i + to_move <= right->max_count());
right->init_child(i, right->child(i + to_move));
right->clear_child(i + to_move);
}
}
set_finish(finish() + to_move);
right->set_finish(right->finish() - to_move);
}
template <typename P>
void btree_node<P>::rebalance_left_to_right(field_type to_move,
btree_node *right,
allocator_type *alloc) {
assert(parent() == right->parent());
assert(position() + 1 == right->position());
assert(count() >= right->count());
assert(to_move >= 1);
assert(to_move <= count());
right->transfer_n_backward(right->count(), right->start() + to_move,
right->start(), right, alloc);
right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
alloc);
parent()->transfer(position(), finish() - to_move, this, alloc);
if (is_internal()) {
for (field_type i = right->finish() + 1; i > right->start(); --i) {
right->init_child(i - 1 + to_move, right->child(i - 1));
right->clear_child(i - 1);
}
for (field_type i = 1; i <= to_move; ++i) {
right->init_child(i - 1, child(finish() - to_move + i));
clear_child(finish() - to_move + i);
}
}
set_finish(finish() - to_move);
right->set_finish(right->finish() + to_move);
}
template <typename P>
void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) {
assert(dest->count() == 0);
assert(max_count() == kNodeSlots);
assert(position() + 1 == dest->position());
assert(parent() == dest->parent());
if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1);
} else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start());
} else {
dest->set_finish(dest->start() + count() / 2);
}
set_finish(finish() - dest->count());
assert(count() >= 1);
dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
--mutable_finish();
parent()->emplace_value(position(), alloc, finish_slot());
value_destroy(finish(), alloc);
parent()->set_child_noupdate_position(position() + 1, dest);
if (is_internal()) {
for (field_type i = dest->start(), j = finish() + 1; i <= dest->finish();
++i, ++j) {
assert(child(j) != nullptr);
dest->init_child(i, child(j));
clear_child(j);
}
}
}
template <typename P>
void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
assert(parent() == src->parent());
assert(position() + 1 == src->position());
value_init(finish(), alloc, parent()->slot(position()));
transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
if (is_internal()) {
for (field_type i = src->start(), j = finish() + 1; i <= src->finish();
++i, ++j) {
init_child(j, src->child(i));
src->clear_child(i);
}
}
set_finish(start() + 1 + count() + src->count());
src->set_finish(src->start());
parent()->remove_values(position(), 1, alloc);
}
template <typename P>
void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
if (node->is_leaf()) {
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(LeafSize(node->max_count()), node, alloc);
return;
}
if (node->count() == 0) {
deallocate(InternalSize(), node, alloc);
return;
}
btree_node *delete_root_parent = node->parent();
while (node->is_internal()) node = node->start_child();
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
btree_node *leftmost_leaf = node;
#endif
size_type pos = node->position();
btree_node *parent = node->parent();
for (;;) {
assert(pos <= parent->finish());
do {
node = parent->child(static_cast<field_type>(pos));
if (node->is_internal()) {
while (node->is_internal()) node = node->start_child();
pos = node->position();
parent = node->parent();
}
node->value_destroy_n(node->start(), node->count(), alloc);
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
if (leftmost_leaf != node)
#endif
deallocate(LeafSize(node->max_count()), node, alloc);
++pos;
} while (pos <= parent->finish());
assert(pos > parent->finish());
do {
node = parent;
pos = node->position();
parent = node->parent();
node->value_destroy_n(node->start(), node->count(), alloc);
deallocate(InternalSize(), node, alloc);
if (parent == delete_root_parent) {
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc);
#endif
return;
}
++pos;
} while (pos > parent->finish());
}
}
template <typename N, typename R, typename P>
auto btree_iterator<N, R, P>::distance_slow(const_iterator other) const
-> difference_type {
const_iterator begin = other;
const_iterator end = *this;
assert(begin.node_ != end.node_ || !begin.node_->is_leaf() ||
begin.position_ != end.position_);
const node_type *node = begin.node_;
difference_type count = node->is_leaf() ? -begin.position_ : 0;
if (node->is_internal()) {
++count;
node = node->child(begin.position_ + 1);
}
while (node->is_internal()) node = node->start_child();
size_type pos = node->position();
const node_type *parent = node->parent();
for (;;) {
assert(pos <= parent->finish());
do {
node = parent->child(static_cast<field_type>(pos));
if (node->is_internal()) {
while (node->is_internal()) node = node->start_child();
pos = node->position();
parent = node->parent();
}
if (node == end.node_) return count + end.position_;
if (parent == end.node_ && pos == static_cast<size_type>(end.position_))
return count + node->count();
count += node->count() + 1;
++pos;
} while (pos <= parent->finish());
assert(pos > parent->finish());
do {
node = parent;
pos = node->position();
parent = node->parent();
if (parent == end.node_ && pos == static_cast<size_type>(end.position_))
return count - 1;
++pos;
} while (pos > parent->finish());
}
}
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::increment_slow() {
if (node_->is_leaf()) {
assert(position_ >= node_->finish());
btree_iterator save(*this);
while (position_ == node_->finish() && !node_->is_root()) {
assert(node_->parent()->child(node_->position()) == node_);
position_ = node_->position();
node_ = node_->parent();
}
if (position_ == node_->finish()) {
*this = save;
}
} else {
assert(position_ < node_->finish());
node_ = node_->child(static_cast<field_type>(position_ + 1));
while (node_->is_internal()) {
node_ = node_->start_child();
}
position_ = node_->start();
}
}
template <typename N, typename R, typename P>
void btree_iterator<N, R, P>::decrement_slow() {
if (node_->is_leaf()) {
assert(position_ <= -1);
btree_iterator save(*this);
while (position_ < node_->start() && !node_->is_root()) {
assert(node_->parent()->child(node_->position()) == node_);
position_ = node_->position() - 1;
node_ = node_->parent();
}
if (position_ < node_->start()) {
*this = save;
}
} else {
assert(position_ >= node_->start());
node_ = node_->child(static_cast<field_type>(position_));
while (node_->is_internal()) {
node_ = node_->child(node_->finish());
}
position_ = node_->finish() - 1;
}
}
template <typename P>
template <typename Btree>
void btree<P>::copy_or_move_values_in_order(Btree &other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
assert(empty());
auto iter = other.begin();
if (iter == other.end()) return;
insert_multi(iter.slot());
++iter;
for (; iter != other.end(); ++iter) {
internal_emplace(end(), iter.slot());
}
}
template <typename P>
constexpr bool btree<P>::static_assert_validation() {
static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
"Key comparison must be nothrow copy constructible");
static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
"Allocator must be nothrow copy constructible");
static_assert(std::is_trivially_copyable<iterator>::value,
"iterator not trivially copyable.");
static_assert(
kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
"target node size too large");
static_assert(
compare_has_valid_result_type<key_compare, key_type>(),
"key comparison function must return absl::{weak,strong}_ordering or "
"bool.");
static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
"node space assumption incorrect");
return true;
}
template <typename P>
template <typename K>
auto btree<P>::lower_bound_equal(const K &key) const
-> std::pair<iterator, bool> {
const SearchResult<iterator, is_key_compare_to::value> res =
internal_lower_bound(key);
const iterator lower = iterator(internal_end(res.value));
const bool equal = res.HasMatch()
? res.IsEq()
: lower != end() && !compare_keys(key, lower.key());
return {lower, equal};
}
template <typename P>
template <typename K>
auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
const iterator lower = lower_and_equal.first;
if (!lower_and_equal.second) {
return {lower, lower};
}
const iterator next = std::next(lower);
if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
assert(next == end() || compare_keys(key, next.key()));
return {lower, next};
}
if (next == end() || compare_keys(key, next.key())) return {lower, next};
return {lower, upper_bound(key)};
}
template <typename P>
template <typename K, typename... Args>
auto btree<P>::insert_unique(const K &key, Args &&...args)
-> std::pair<iterator, bool> {
if (empty()) {
mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
iterator iter = res.value;
if (res.HasMatch()) {
if (res.IsEq()) {
return {iter, false};
}
} else {
iterator last = internal_last(iter);
if (last.node_ && !compare_keys(key, last.key())) {
return {last, false};
}
}
return {internal_emplace(iter, std::forward<Args>(args)...), true};
}
template <typename P>
template <typename K, typename... Args>
inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
Args &&...args)
-> std::pair<iterator, bool> {
if (!empty()) {
if (position == end() || compare_keys(key, position.key())) {
if (position == begin() || compare_keys(std::prev(position).key(), key)) {
return {internal_emplace(position, std::forward<Args>(args)...), true};
}
} else if (compare_keys(position.key(), key)) {
++position;
if (position == end() || compare_keys(key, position.key())) {
return {internal_emplace(position, std::forward<Args>(args)...), true};
}
} else {
return {position, false};
}
}
return insert_unique(key, std::forward<Args>(args)...);
}
template <typename P>
template <typename InputIterator, typename>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
for (; b != e; ++b) {
insert_hint_unique(end(), params_type::key(*b), *b);
}
}
template <typename P>
template <typename InputIterator>
void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
for (; b != e; ++b) {
auto node_handle =
CommonAccess::Construct<node_handle_type>(get_allocator(), *b);
slot_type *slot = CommonAccess::GetSlot(node_handle);
insert_hint_unique(end(), params_type::key(slot), slot);
}
}
template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
mutable_root() = mutable_rightmost() = new_leaf_root_node(1);
}
iterator iter = internal_upper_bound(key);
if (iter.node_ == nullptr) {
iter = end();
}
return internal_emplace(iter, std::forward<ValueType>(v));
}
template <typename P>
template <typename ValueType>
auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
if (!empty()) {
const key_type &key = params_type::key(v);
if (position == end() || !compare_keys(position.key(), key)) {
if (position == begin() ||
!compare_keys(key, std::prev(position).key())) {
return internal_emplace(position, std::forward<ValueType>(v));
}
} else {
++position;
if (position == end() || !compare_keys(position.key(), key)) {
return internal_emplace(position, std::forward<ValueType>(v));
}
}
}
return insert_multi(std::forward<ValueType>(v));
}
template <typename P>
template <typename InputIterator>
void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
for (; b != e; ++b) {
insert_hint_multi(end(), *b);
}
}
template <typename P>
auto btree<P>::operator=(const btree &other) -> btree & {
if (this != &other) {
clear();
*mutable_key_comp() = other.key_comp();
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
*mutable_allocator() = other.allocator();
}
copy_or_move_values_in_order(other);
}
return *this;
}
template <typename P>
auto btree<P>::operator=(btree &&other) noexcept -> btree & {
if (this != &other) {
clear();
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_move_assignment::value) {
swap(root_, other.root_);
swap(rightmost_, other.rightmost_);
swap(size_, other.size_);
} else {
if (allocator() == other.allocator()) {
swap(mutable_root(), other.mutable_root());
swap(*mutable_key_comp(), *other.mutable_key_comp());
swap(mutable_rightmost(), other.mutable_rightmost());
swap(size_, other.size_);
} else {
*mutable_key_comp() = other.key_comp();
copy_or_move_values_in_order(other);
}
}
}
return *this;
}
template <typename P>
auto btree<P>::erase(iterator iter) -> iterator {
iter.node_->value_destroy(static_cast<field_type>(iter.position_),
mutable_allocator());
iter.update_generation();
const bool internal_delete = iter.node_->is_internal();
if (internal_delete) {
iterator internal_iter(iter);
--iter;
assert(iter.node_->is_leaf());
internal_iter.node_->transfer(
static_cast<size_type>(internal_iter.position_),
static_cast<size_type>(iter.position_), iter.node_,
mutable_allocator());
} else {
const field_type transfer_from =
static_cast<field_type>(iter.position_ + 1);
const field_type num_to_transfer = iter.node_->finish() - transfer_from;
iter.node_->transfer_n(num_to_transfer,
static_cast<size_type>(iter.position_),
transfer_from, iter.node_, mutable_allocator());
}
iter.node_->set_finish(iter.node_->finish() - 1);
--size_;
iterator res = rebalance_after_delete(iter);
if (internal_delete) {
++res;
}
return res;
}
template <typename P>
auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
iterator res(iter);
bool first_iteration = true;
for (;;) {
if (iter.node_ == root()) {
try_shrink();
if (empty()) {
return end();
}
break;
}
if (iter.node_->count() >= kMinNodeValues) {
break;
}
bool merged = try_merge_or_rebalance(&iter);
if (first_iteration) {
res = iter;
first_iteration = false;
}
if (!merged) {
break;
}
iter.position_ = iter.node_->position();
iter.node_ = iter.node_->parent();
}
res.update_generation();
if (res.position_ == res.node_->finish()) {
res.position_ = res.node_->finish() - 1;
++res;
}
return res;
}
template <typename P>
auto btree<P>::erase_range(iterator begin, iterator end)
-> std::pair<size_type, iterator> {
size_type count = static_cast<size_type>(end - begin);
assert(count >= 0);
if (count == 0) {
return {0, begin};
}
if (static_cast<size_type>(count) == size_) {
clear();
return {count, this->end()};
}
if (begin.node_ == end.node_) {
assert(end.position_ > begin.position_);
begin.node_->remove_values(
static_cast<field_type>(begin.position_),
static_cast<field_type>(end.position_ - begin.position_),
mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
const size_type target_size = size_ - count;
while (size_ > target_size) {
if (begin.node_->is_leaf()) {
const size_type remaining_to_erase = size_ - target_size;
const size_type remaining_in_node =
static_cast<size_type>(begin.node_->finish() - begin.position_);
const field_type to_erase = static_cast<field_type>(
(std::min)(remaining_to_erase, remaining_in_node));
begin.node_->remove_values(static_cast<field_type>(begin.position_),
to_erase, mutable_allocator());
size_ -= to_erase;
begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
}
begin.update_generation();
return {count, begin};
}
template <typename P>
void btree<P>::clear() {
if (!empty()) {
node_type::clear_and_delete(root(), mutable_allocator());
}
mutable_root() = mutable_rightmost() = EmptyNode();
size_ = 0;
}
template <typename P>
void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
swap(rightmost_, other.rightmost_);
} else {
assert(allocator() == other.allocator());
swap(mutable_rightmost(), other.mutable_rightmost());
swap(*mutable_key_comp(), *other.mutable_key_comp());
}
swap(mutable_root(), other.mutable_root());
swap(size_, other.size_);
}
template <typename P>
void btree<P>::verify() const {
assert(root() != nullptr);
assert(leftmost() != nullptr);
assert(rightmost() != nullptr);
assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
assert(leftmost() == (++const_iterator(root(), -1)).node_);
assert(rightmost() == (--const_iterator(root(), root()->finish())).node_);
assert(leftmost()->is_leaf());
assert(rightmost()->is_leaf());
}
template <typename P>
void btree<P>::rebalance_or_split(iterator *iter) {
node_type *&node = iter->node_;
int &insert_position = iter->position_;
assert(node->count() == node->max_count());
assert(kNodeSlots == node->max_count());
node_type *parent = node->parent();
if (node != root()) {
if (node->position() > parent->start()) {
node_type *left = parent->child(node->position() - 1);
assert(left->max_count() == kNodeSlots);
if (left->count() < kNodeSlots) {
field_type to_move =
(kNodeSlots - left->count()) /
(1 + (static_cast<field_type>(insert_position) < kNodeSlots));
to_move = (std::max)(field_type{1}, to_move);
if (static_cast<field_type>(insert_position) - to_move >=
node->start() ||
left->count() + to_move < kNodeSlots) {
left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move);
insert_position = static_cast<int>(
static_cast<field_type>(insert_position) - to_move);
if (insert_position < node->start()) {
insert_position = insert_position + left->count() + 1;
node = left;
}
assert(node->count() < node->max_count());
return;
}
}
}
if (node->position() < parent->finish()) {
node_type *right = parent->child(node->position() + 1);
assert(right->max_count() == kNodeSlots);
if (right->count() < kNodeSlots) {
field_type to_move = (kNodeSlots - right->count()) /
(1 + (insert_position > node->start()));
to_move = (std::max)(field_type{1}, to_move);
if (static_cast<field_type>(insert_position) <=
node->finish() - to_move ||
right->count() + to_move < kNodeSlots) {
node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) {
insert_position = insert_position - node->count() - 1;
node = right;
}
assert(node->count() < node->max_count());
return;
}
}
}
assert(parent->max_count() == kNodeSlots);
if (parent->count() == kNodeSlots) {
iterator parent_iter(parent, node->position());
rebalance_or_split(&parent_iter);
parent = node->parent();
}
} else {
parent = new_internal_node(0, parent);
parent->set_generation(root()->generation());
parent->init_child(parent->start(), node);
mutable_root() = parent;
assert(parent->start_child()->is_internal() ||
parent->start_child() == rightmost());
}
node_type *split_node;
if (node->is_leaf()) {
split_node = new_leaf_node(node->position() + 1, parent);
node->split(insert_position, split_node, mutable_allocator());
if (rightmost() == node) mutable_rightmost() = split_node;
} else {
split_node = new_internal_node(node->position() + 1, parent);
node->split(insert_position, split_node, mutable_allocator());
}
if (insert_position > node->finish()) {
insert_position = insert_position - node->count() - 1;
node = split_node;
}
}
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
if (rightmost() == right) mutable_rightmost() = left;
}
template <typename P>
bool btree<P>::try_merge_or_rebalance(iterator *iter) {
node_type *parent = iter->node_->parent();
if (iter->node_->position() > parent->start()) {
node_type *left = parent->child(iter->node_->position() - 1);
assert(left->max_count() == kNodeSlots);
if (1U + left->count() + iter->node_->count() <= kNodeSlots) {
iter->position_ += 1 + left->count();
merge_nodes(left, iter->node_);
iter->node_ = left;
return true;
}
}
if (iter->node_->position() < parent->finish()) {
node_type *right = parent->child(iter->node_->position() + 1);
assert(right->max_count() == kNodeSlots);
if (1U + iter->node_->count() + right->count() <= kNodeSlots) {
merge_nodes(iter->node_, right);
return true;
}
if (right->count() > kMinNodeValues &&
(iter->node_->count() == 0 || iter->position_ > iter->node_->start())) {
field_type to_move = (right->count() - iter->node_->count()) / 2;
to_move =
(std::min)(to_move, static_cast<field_type>(right->count() - 1));
iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator());
return false;
}
}
if (iter->node_->position() > parent->start()) {
node_type *left = parent->child(iter->node_->position() - 1);
if (left->count() > kMinNodeValues &&
(iter->node_->count() == 0 ||
iter->position_ < iter->node_->finish())) {
field_type to_move = (left->count() - iter->node_->count()) / 2;
to_move = (std::min)(to_move, static_cast<field_type>(left->count() - 1));
left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator());
iter->position_ += to_move;
return false;
}
}
return false;
}
template <typename P>
void btree<P>::try_shrink() {
node_type *orig_root = root();
if (orig_root->count() > 0) {
return;
}
if (orig_root->is_leaf()) {
assert(size() == 0);
mutable_root() = mutable_rightmost() = EmptyNode();
} else {
node_type *child = orig_root->start_child();
child->make_root();
mutable_root() = child;
}
node_type::clear_and_delete(orig_root, mutable_allocator());
}
template <typename P>
template <typename IterType>
inline IterType btree<P>::internal_last(IterType iter) {
assert(iter.node_ != nullptr);
while (iter.position_ == iter.node_->finish()) {
iter.position_ = iter.node_->position();
iter.node_ = iter.node_->parent();
if (iter.node_->is_leaf()) {
iter.node_ = nullptr;
break;
}
}
iter.update_generation();
return iter;
}
template <typename P>
template <typename... Args>
inline auto btree<P>::internal_emplace(iterator iter, Args &&...args)
-> iterator {
if (iter.node_->is_internal()) {
--iter;
++iter.position_;
}
const field_type max_count = iter.node_->max_count();
allocator_type *alloc = mutable_allocator();
const auto transfer_and_delete = [&](node_type *old_node,
node_type *new_node) {
new_node->transfer_n(old_node->count(), new_node->start(),
old_node->start(), old_node, alloc);
new_node->set_finish(old_node->finish());
old_node->set_finish(old_node->start());
new_node->set_generation(old_node->generation());
node_type::clear_and_delete(old_node, alloc);
};
const auto replace_leaf_root_node = [&](field_type new_node_size) {
assert(iter.node_ == root());
node_type *old_root = iter.node_;
node_type *new_root = iter.node_ = new_leaf_root_node(new_node_size);
transfer_and_delete(old_root, new_root);
mutable_root() = mutable_rightmost() = new_root;
};
bool replaced_node = false;
if (iter.node_->count() == max_count) {
if (max_count < kNodeSlots) {
replace_leaf_root_node(static_cast<field_type>(
(std::min)(static_cast<int>(kNodeSlots), 2 * max_count)));
replaced_node = true;
} else {
rebalance_or_split(&iter);
}
}
(void)replaced_node;
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_HWADDRESS_SANITIZER)
if (!replaced_node) {
assert(iter.node_->is_leaf());
if (iter.node_->is_root()) {
replace_leaf_root_node(max_count);
} else {
node_type *old_node = iter.node_;
const bool was_rightmost = rightmost() == old_node;
const bool was_leftmost = leftmost() == old_node;
node_type *parent = old_node->parent();
const field_type position = old_node->position();
node_type *new_node = iter.node_ = new_leaf_node(position, parent);
parent->set_child_noupdate_position(position, new_node);
transfer_and_delete(old_node, new_node);
if (was_rightmost) mutable_rightmost() = new_node;
if (was_leftmost) root()->set_parent(new_node);
}
}
#endif
iter.node_->emplace_value(static_cast<field_type>(iter.position_), alloc,
std::forward<Args>(args)...);
assert(
iter.node_->is_ordered_correctly(static_cast<field_type>(iter.position_),
original_key_compare(key_comp())) &&
"If this assert fails, then either (1) the comparator may violate "
"transitivity, i.e. comp(a,b) && comp(b,c) -> comp(a,c) (see "
"https:
"key may have been mutated after it was inserted into the tree.");
++size_;
iter.update_generation();
return iter;
}
template <typename P>
template <typename K>
inline auto btree<P>::internal_locate(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
SearchResult<size_type, is_key_compare_to::value> res =
iter.node_->lower_bound(key, key_comp());
iter.position_ = static_cast<int>(res.value);
if (res.IsEq()) {
return {iter, MatchKind::kEq};
}
if (iter.node_->is_leaf()) {
break;
}
iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
}
return {iter, MatchKind::kNe};
}
template <typename P>
template <typename K>
auto btree<P>::internal_lower_bound(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> {
if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
ret.value = internal_last(ret.value);
return ret;
}
iterator iter(const_cast<node_type *>(root()));
SearchResult<size_type, is_key_compare_to::value> res;
bool seen_eq = false;
for (;;) {
res = iter.node_->lower_bound(key, key_comp());
iter.position_ = static_cast<int>(res.value);
if (iter.node_->is_leaf()) {
break;
}
seen_eq = seen_eq || res.IsEq();
iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
}
if (res.IsEq()) return {iter, MatchKind::kEq};
return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
}
template <typename P>
template <typename K>
auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
iter.position_ = static_cast<int>(iter.node_->upper_bound(key, key_comp()));
if (iter.node_->is_leaf()) {
break;
}
iter.node_ = iter.node_->child(static_cast<field_type>(iter.position_));
}
return internal_last(iter);
}
template <typename P>
template <typename K>
auto btree<P>::internal_find(const K &key) const -> iterator {
SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
if (res.HasMatch()) {
if (res.IsEq()) {
return res.value;
}
} else {
const iterator iter = internal_last(res.value);
if (iter.node_ != nullptr && !compare_keys(key, iter.key())) {
return iter;
}
}
return {nullptr, 0};
}
template <typename P>
typename btree<P>::size_type btree<P>::internal_verify(
const node_type *node, const key_type *lo, const key_type *hi) const {
assert(node->count() > 0);
assert(node->count() <= node->max_count());
if (lo) {
assert(!compare_keys(node->key(node->start()), *lo));
}
if (hi) {
assert(!compare_keys(*hi, node->key(node->finish() - 1)));
}
for (int i = node->start() + 1; i < node->finish(); ++i) {
assert(!compare_keys(node->key(i), node->key(i - 1)));
}
size_type count = node->count();
if (node->is_internal()) {
for (field_type i = node->start(); i <= node->finish(); ++i) {
assert(node->child(i) != nullptr);
assert(node->child(i)->parent() == node);
assert(node->child(i)->position() == i);
count += internal_verify(node->child(i),
i == node->start() ? lo : &node->key(i - 1),
i == node->finish() ? hi : &node->key(i));
}
}
return count;
}
struct btree_access {
template <typename BtreeContainer, typename Pred>
static auto erase_if(BtreeContainer &container, Pred pred) ->
typename BtreeContainer::size_type {
const auto initial_size = container.size();
auto &tree = container.tree_;
auto *alloc = tree.mutable_allocator();
for (auto it = container.begin(); it != container.end();) {
if (!pred(*it)) {
++it;
continue;
}
auto *node = it.node_;
if (node->is_internal()) {
it = container.erase(it);
continue;
}
int to_pos = it.position_;
node->value_destroy(it.position_, alloc);
while (++it.position_ < node->finish()) {
it.update_generation();
if (pred(*it)) {
node->value_destroy(it.position_, alloc);
} else {
node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc);
}
}
const int num_deleted = node->finish() - to_pos;
tree.size_ -= num_deleted;
node->set_finish(to_pos);
it.position_ = to_pos;
it = tree.rebalance_after_delete(it);
}
return initial_size - container.size();
}
};
#undef ABSL_BTREE_ENABLE_GENERATIONS
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/btree_test.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <functional>
#include <iostream>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <numeric>
#include <stdexcept>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/algorithm/container.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/flags/flag.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/types/optional.h"
ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests");
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::absl::test_internal::CopyableMovableInstance;
using ::absl::test_internal::InstanceTracker;
using ::absl::test_internal::MovableOnlyInstance;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::Pair;
using ::testing::SizeIs;
template <typename T, typename U>
void CheckPairEquals(const T &x, const U &y) {
ABSL_INTERNAL_CHECK(x == y, "Values are unequal.");
}
template <typename T, typename U, typename V, typename W>
void CheckPairEquals(const std::pair<T, U> &x, const std::pair<V, W> &y) {
CheckPairEquals(x.first, y.first);
CheckPairEquals(x.second, y.second);
}
}
template <typename TreeType, typename CheckerType>
class base_checker {
public:
using key_type = typename TreeType::key_type;
using value_type = typename TreeType::value_type;
using key_compare = typename TreeType::key_compare;
using pointer = typename TreeType::pointer;
using const_pointer = typename TreeType::const_pointer;
using reference = typename TreeType::reference;
using const_reference = typename TreeType::const_reference;
using size_type = typename TreeType::size_type;
using difference_type = typename TreeType::difference_type;
using iterator = typename TreeType::iterator;
using const_iterator = typename TreeType::const_iterator;
using reverse_iterator = typename TreeType::reverse_iterator;
using const_reverse_iterator = typename TreeType::const_reverse_iterator;
public:
base_checker() : const_tree_(tree_) {}
base_checker(const base_checker &other)
: tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {}
template <typename InputIterator>
base_checker(InputIterator b, InputIterator e)
: tree_(b, e), const_tree_(tree_), checker_(b, e) {}
iterator begin() { return tree_.begin(); }
const_iterator begin() const { return tree_.begin(); }
iterator end() { return tree_.end(); }
const_iterator end() const { return tree_.end(); }
reverse_iterator rbegin() { return tree_.rbegin(); }
const_reverse_iterator rbegin() const { return tree_.rbegin(); }
reverse_iterator rend() { return tree_.rend(); }
const_reverse_iterator rend() const { return tree_.rend(); }
template <typename IterType, typename CheckerIterType>
IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const {
if (tree_iter == tree_.end()) {
ABSL_INTERNAL_CHECK(checker_iter == checker_.end(),
"Checker iterator not at end.");
} else {
CheckPairEquals(*tree_iter, *checker_iter);
}
return tree_iter;
}
template <typename IterType, typename CheckerIterType>
IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const {
if (tree_iter == tree_.rend()) {
ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(),
"Checker iterator not at rend.");
} else {
CheckPairEquals(*tree_iter, *checker_iter);
}
return tree_iter;
}
void value_check(const value_type &v) {
typename KeyOfValue<typename TreeType::key_type,
typename TreeType::value_type>::type key_of_value;
const key_type &key = key_of_value(v);
CheckPairEquals(*find(key), v);
lower_bound(key);
upper_bound(key);
equal_range(key);
contains(key);
count(key);
}
void erase_check(const key_type &key) {
EXPECT_FALSE(tree_.contains(key));
EXPECT_EQ(tree_.find(key), const_tree_.end());
EXPECT_FALSE(const_tree_.contains(key));
EXPECT_EQ(const_tree_.find(key), tree_.end());
EXPECT_EQ(tree_.equal_range(key).first,
const_tree_.equal_range(key).second);
}
iterator lower_bound(const key_type &key) {
return iter_check(tree_.lower_bound(key), checker_.lower_bound(key));
}
const_iterator lower_bound(const key_type &key) const {
return iter_check(tree_.lower_bound(key), checker_.lower_bound(key));
}
iterator upper_bound(const key_type &key) {
return iter_check(tree_.upper_bound(key), checker_.upper_bound(key));
}
const_iterator upper_bound(const key_type &key) const {
return iter_check(tree_.upper_bound(key), checker_.upper_bound(key));
}
std::pair<iterator, iterator> equal_range(const key_type &key) {
std::pair<typename CheckerType::iterator, typename CheckerType::iterator>
checker_res = checker_.equal_range(key);
std::pair<iterator, iterator> tree_res = tree_.equal_range(key);
iter_check(tree_res.first, checker_res.first);
iter_check(tree_res.second, checker_res.second);
return tree_res;
}
std::pair<const_iterator, const_iterator> equal_range(
const key_type &key) const {
std::pair<typename CheckerType::const_iterator,
typename CheckerType::const_iterator>
checker_res = checker_.equal_range(key);
std::pair<const_iterator, const_iterator> tree_res = tree_.equal_range(key);
iter_check(tree_res.first, checker_res.first);
iter_check(tree_res.second, checker_res.second);
return tree_res;
}
iterator find(const key_type &key) {
return iter_check(tree_.find(key), checker_.find(key));
}
const_iterator find(const key_type &key) const {
return iter_check(tree_.find(key), checker_.find(key));
}
bool contains(const key_type &key) const { return find(key) != end(); }
size_type count(const key_type &key) const {
size_type res = checker_.count(key);
EXPECT_EQ(res, tree_.count(key));
return res;
}
base_checker &operator=(const base_checker &other) {
tree_ = other.tree_;
checker_ = other.checker_;
return *this;
}
int erase(const key_type &key) {
int size = tree_.size();
int res = checker_.erase(key);
EXPECT_EQ(res, tree_.count(key));
EXPECT_EQ(res, tree_.erase(key));
EXPECT_EQ(tree_.count(key), 0);
EXPECT_EQ(tree_.size(), size - res);
erase_check(key);
return res;
}
iterator erase(iterator iter) {
key_type key = iter.key();
int size = tree_.size();
int count = tree_.count(key);
auto checker_iter = checker_.lower_bound(key);
for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) {
++checker_iter;
}
auto checker_next = checker_iter;
++checker_next;
checker_.erase(checker_iter);
iter = tree_.erase(iter);
EXPECT_EQ(tree_.size(), checker_.size());
EXPECT_EQ(tree_.size(), size - 1);
EXPECT_EQ(tree_.count(key), count - 1);
if (count == 1) {
erase_check(key);
}
return iter_check(iter, checker_next);
}
void erase(iterator begin, iterator end) {
int size = tree_.size();
int count = std::distance(begin, end);
auto checker_begin = checker_.lower_bound(begin.key());
for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) {
++checker_begin;
}
auto checker_end =
end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key());
if (end != tree_.end()) {
for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) {
++checker_end;
}
}
const auto checker_ret = checker_.erase(checker_begin, checker_end);
const auto tree_ret = tree_.erase(begin, end);
EXPECT_EQ(std::distance(checker_.begin(), checker_ret),
std::distance(tree_.begin(), tree_ret));
EXPECT_EQ(tree_.size(), checker_.size());
EXPECT_EQ(tree_.size(), size - count);
}
void clear() {
tree_.clear();
checker_.clear();
}
void swap(base_checker &other) {
tree_.swap(other.tree_);
checker_.swap(other.checker_);
}
void verify() const {
tree_.verify();
EXPECT_EQ(tree_.size(), checker_.size());
auto checker_iter = checker_.begin();
const_iterator tree_iter(tree_.begin());
for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) {
CheckPairEquals(*tree_iter, *checker_iter);
}
for (int n = tree_.size() - 1; n >= 0; --n) {
iter_check(tree_iter, checker_iter);
--tree_iter;
--checker_iter;
}
EXPECT_EQ(tree_iter, tree_.begin());
EXPECT_EQ(checker_iter, checker_.begin());
auto checker_riter = checker_.rbegin();
const_reverse_iterator tree_riter(tree_.rbegin());
for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) {
CheckPairEquals(*tree_riter, *checker_riter);
}
for (int n = tree_.size() - 1; n >= 0; --n) {
riter_check(tree_riter, checker_riter);
--tree_riter;
--checker_riter;
}
EXPECT_EQ(tree_riter, tree_.rbegin());
EXPECT_EQ(checker_riter, checker_.rbegin());
}
const TreeType &tree() const { return tree_; }
size_type size() const {
EXPECT_EQ(tree_.size(), checker_.size());
return tree_.size();
}
size_type max_size() const { return tree_.max_size(); }
bool empty() const {
EXPECT_EQ(tree_.empty(), checker_.empty());
return tree_.empty();
}
protected:
TreeType tree_;
const TreeType &const_tree_;
CheckerType checker_;
};
namespace {
template <typename TreeType, typename CheckerType>
class unique_checker : public base_checker<TreeType, CheckerType> {
using super_type = base_checker<TreeType, CheckerType>;
public:
using iterator = typename super_type::iterator;
using value_type = typename super_type::value_type;
public:
unique_checker() : super_type() {}
unique_checker(const unique_checker &other) : super_type(other) {}
template <class InputIterator>
unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
unique_checker &operator=(const unique_checker &) = default;
std::pair<iterator, bool> insert(const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
this->checker_.insert(v);
std::pair<iterator, bool> tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res.first, *checker_res.first);
EXPECT_EQ(tree_res.second, checker_res.second);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + tree_res.second);
return tree_res;
}
iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
this->checker_.insert(v);
iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res.first);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + checker_res.second);
return tree_res;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
for (; b != e; ++b) {
insert(*b);
}
}
};
template <typename TreeType, typename CheckerType>
class multi_checker : public base_checker<TreeType, CheckerType> {
using super_type = base_checker<TreeType, CheckerType>;
public:
using iterator = typename super_type::iterator;
using value_type = typename super_type::value_type;
public:
multi_checker() : super_type() {}
multi_checker(const multi_checker &other) : super_type(other) {}
template <class InputIterator>
multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
multi_checker &operator=(const multi_checker &) = default;
iterator insert(const value_type &v) {
int size = this->tree_.size();
auto checker_res = this->checker_.insert(v);
iterator tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
return tree_res;
}
iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
auto checker_res = this->checker_.insert(v);
iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
return tree_res;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
for (; b != e; ++b) {
insert(*b);
}
}
};
template <typename T, typename V>
void DoTest(const char *name, T *b, const std::vector<V> &values) {
typename KeyOfValue<typename T::key_type, V>::type key_of_value;
T &mutable_b = *b;
const T &const_b = *b;
for (int i = 0; i < values.size(); ++i) {
mutable_b.insert(values[i]);
mutable_b.value_check(values[i]);
}
ASSERT_EQ(mutable_b.size(), values.size());
const_b.verify();
T b_copy(const_b);
EXPECT_EQ(b_copy.size(), const_b.size());
for (int i = 0; i < values.size(); ++i) {
CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]);
}
T b_range(const_b.begin(), const_b.end());
EXPECT_EQ(b_range.size(), const_b.size());
for (int i = 0; i < values.size(); ++i) {
CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
}
b_range.insert(b_copy.begin(), b_copy.end());
b_range.verify();
b_range.clear();
b_range.insert(b_copy.begin(), b_copy.end());
EXPECT_EQ(b_range.size(), b_copy.size());
for (int i = 0; i < values.size(); ++i) {
CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
}
b_range.operator=(b_range);
EXPECT_EQ(b_range.size(), b_copy.size());
b_range.clear();
b_range = b_copy;
EXPECT_EQ(b_range.size(), b_copy.size());
b_range.clear();
b_range.swap(b_copy);
EXPECT_EQ(b_copy.size(), 0);
EXPECT_EQ(b_range.size(), const_b.size());
for (int i = 0; i < values.size(); ++i) {
CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
}
b_range.swap(b_copy);
swap(b_range, b_copy);
EXPECT_EQ(b_copy.size(), 0);
EXPECT_EQ(b_range.size(), const_b.size());
for (int i = 0; i < values.size(); ++i) {
CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
}
swap(b_range, b_copy);
for (int i = 0; i < values.size(); ++i) {
mutable_b.erase(key_of_value(values[i]));
ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0);
}
const_b.verify();
EXPECT_EQ(const_b.size(), 0);
mutable_b = b_copy;
for (int i = 0; i < values.size(); ++i) {
mutable_b.erase(mutable_b.find(key_of_value(values[i])));
}
const_b.verify();
EXPECT_EQ(const_b.size(), 0);
for (int i = 0; i < values.size(); i++) {
mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]);
}
const_b.verify();
mutable_b.erase(mutable_b.begin(), mutable_b.end());
EXPECT_EQ(mutable_b.size(), 0);
const_b.verify();
mutable_b = b_copy;
typename T::iterator mutable_iter_end = mutable_b.begin();
for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end;
mutable_b.erase(mutable_b.begin(), mutable_iter_end);
EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2);
const_b.verify();
mutable_b = b_copy;
typename T::iterator mutable_iter_begin = mutable_b.begin();
for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin;
mutable_b.erase(mutable_iter_begin, mutable_b.end());
EXPECT_EQ(mutable_b.size(), values.size() / 2);
const_b.verify();
mutable_b = b_copy;
mutable_iter_begin = mutable_b.begin();
for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin;
mutable_iter_end = mutable_iter_begin;
for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end;
mutable_b.erase(mutable_iter_begin, mutable_iter_end);
EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4);
const_b.verify();
mutable_b.clear();
}
template <typename T>
void ConstTest() {
using value_type = typename T::value_type;
typename KeyOfValue<typename T::key_type, value_type>::type key_of_value;
T mutable_b;
const T &const_b = mutable_b;
value_type value = Generator<value_type>(2)(2);
mutable_b.insert(value);
EXPECT_TRUE(mutable_b.contains(key_of_value(value)));
EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end());
EXPECT_TRUE(const_b.contains(key_of_value(value)));
EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end());
EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value);
EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end());
EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value);
typename T::iterator mutable_iter(mutable_b.begin());
EXPECT_EQ(mutable_iter, const_b.begin());
EXPECT_NE(mutable_iter, const_b.end());
EXPECT_EQ(const_b.begin(), mutable_iter);
EXPECT_NE(const_b.end(), mutable_iter);
typename T::reverse_iterator mutable_riter(mutable_b.rbegin());
EXPECT_EQ(mutable_riter, const_b.rbegin());
EXPECT_NE(mutable_riter, const_b.rend());
EXPECT_EQ(const_b.rbegin(), mutable_riter);
EXPECT_NE(const_b.rend(), mutable_riter);
typename T::const_iterator const_iter(mutable_iter);
EXPECT_EQ(const_iter, mutable_b.begin());
EXPECT_NE(const_iter, mutable_b.end());
EXPECT_EQ(mutable_b.begin(), const_iter);
EXPECT_NE(mutable_b.end(), const_iter);
typename T::const_reverse_iterator const_riter(mutable_riter);
EXPECT_EQ(const_riter, mutable_b.rbegin());
EXPECT_NE(const_riter, mutable_b.rend());
EXPECT_EQ(mutable_b.rbegin(), const_riter);
EXPECT_NE(mutable_b.rend(), const_riter);
const_b.verify();
ASSERT_TRUE(!const_b.empty());
EXPECT_EQ(const_b.size(), 1);
EXPECT_GT(const_b.max_size(), 0);
EXPECT_TRUE(const_b.contains(key_of_value(value)));
EXPECT_EQ(const_b.count(key_of_value(value)), 1);
}
template <typename T, typename C>
void BtreeTest() {
ConstTest<T>();
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
GTEST_FLAG_GET(random_seed));
unique_checker<T, C> container;
std::vector<V> sorted_values(random_values);
std::sort(sorted_values.begin(), sorted_values.end());
DoTest("sorted: ", &container, sorted_values);
std::reverse(sorted_values.begin(), sorted_values.end());
DoTest("rsorted: ", &container, sorted_values);
DoTest("random: ", &container, random_values);
}
template <typename T, typename C>
void BtreeMultiTest() {
ConstTest<T>();
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
GTEST_FLAG_GET(random_seed));
multi_checker<T, C> container;
std::vector<V> sorted_values(random_values);
std::sort(sorted_values.begin(), sorted_values.end());
DoTest("sorted: ", &container, sorted_values);
std::reverse(sorted_values.begin(), sorted_values.end());
DoTest("rsorted: ", &container, sorted_values);
DoTest("random: ", &container, random_values);
std::vector<V> duplicate_values(random_values);
duplicate_values.insert(duplicate_values.end(), random_values.begin(),
random_values.end());
DoTest("duplicates:", &container, duplicate_values);
std::vector<V> identical_values(100);
std::fill(identical_values.begin(), identical_values.end(),
Generator<V>(2)(2));
DoTest("identical: ", &container, identical_values);
}
template <typename T>
void BtreeMapTest() {
using value_type = typename T::value_type;
using mapped_type = typename T::mapped_type;
mapped_type m = Generator<mapped_type>(0)(0);
(void)m;
T b;
for (int i = 0; i < 1000; i++) {
value_type v = Generator<value_type>(1000)(i);
b[v.first] = v.second;
}
EXPECT_EQ(b.size(), 1000);
EXPECT_EQ(b.begin()->first, Generator<value_type>(1000)(0).first);
EXPECT_EQ(b.begin()->second, Generator<value_type>(1000)(0).second);
EXPECT_EQ(b.rbegin()->first, Generator<value_type>(1000)(999).first);
EXPECT_EQ(b.rbegin()->second, Generator<value_type>(1000)(999).second);
}
template <typename T>
void BtreeMultiMapTest() {
using mapped_type = typename T::mapped_type;
mapped_type m = Generator<mapped_type>(0)(0);
(void)m;
}
template <typename K, int N = 256>
void SetTest() {
EXPECT_EQ(
sizeof(absl::btree_set<K>),
2 * sizeof(void *) + sizeof(typename absl::btree_set<K>::size_type));
using BtreeSet = absl::btree_set<K>;
BtreeTest<BtreeSet, std::set<K>>();
}
template <typename K, int N = 256>
void MapTest() {
EXPECT_EQ(
sizeof(absl::btree_map<K, K>),
2 * sizeof(void *) + sizeof(typename absl::btree_map<K, K>::size_type));
using BtreeMap = absl::btree_map<K, K>;
BtreeTest<BtreeMap, std::map<K, K>>();
BtreeMapTest<BtreeMap>();
}
TEST(Btree, set_int32) { SetTest<int32_t>(); }
TEST(Btree, set_string) { SetTest<std::string>(); }
TEST(Btree, set_cord) { SetTest<absl::Cord>(); }
TEST(Btree, map_int32) { MapTest<int32_t>(); }
TEST(Btree, map_string) { MapTest<std::string>(); }
TEST(Btree, map_cord) { MapTest<absl::Cord>(); }
template <typename K, int N = 256>
void MultiSetTest() {
EXPECT_EQ(
sizeof(absl::btree_multiset<K>),
2 * sizeof(void *) + sizeof(typename absl::btree_multiset<K>::size_type));
using BtreeMSet = absl::btree_multiset<K>;
BtreeMultiTest<BtreeMSet, std::multiset<K>>();
}
template <typename K, int N = 256>
void MultiMapTest() {
EXPECT_EQ(sizeof(absl::btree_multimap<K, K>),
2 * sizeof(void *) +
sizeof(typename absl::btree_multimap<K, K>::size_type));
using BtreeMMap = absl::btree_multimap<K, K>;
BtreeMultiTest<BtreeMMap, std::multimap<K, K>>();
BtreeMultiMapTest<BtreeMMap>();
}
TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); }
TEST(Btree, multiset_string) { MultiSetTest<std::string>(); }
TEST(Btree, multiset_cord) { MultiSetTest<absl::Cord>(); }
TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); }
TEST(Btree, multimap_string) { MultiMapTest<std::string>(); }
TEST(Btree, multimap_cord) { MultiMapTest<absl::Cord>(); }
struct CompareIntToString {
bool operator()(const std::string &a, const std::string &b) const {
return a < b;
}
bool operator()(const std::string &a, int b) const {
return a < absl::StrCat(b);
}
bool operator()(int a, const std::string &b) const {
return absl::StrCat(a) < b;
}
using is_transparent = void;
};
struct NonTransparentCompare {
template <typename T, typename U>
bool operator()(const T &t, const U &u) const {
EXPECT_TRUE((std::is_same<T, U>()));
return t < u;
}
};
template <typename T>
bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) {
return true;
}
template <typename T>
bool CanEraseWithEmptyBrace(T, ...) {
return false;
}
template <typename T>
void TestHeterogeneous(T table) {
auto lb = table.lower_bound("3");
EXPECT_EQ(lb, table.lower_bound(3));
EXPECT_NE(lb, table.lower_bound(4));
EXPECT_EQ(lb, table.lower_bound({"3"}));
EXPECT_NE(lb, table.lower_bound({}));
auto ub = table.upper_bound("3");
EXPECT_EQ(ub, table.upper_bound(3));
EXPECT_NE(ub, table.upper_bound(5));
EXPECT_EQ(ub, table.upper_bound({"3"}));
EXPECT_NE(ub, table.upper_bound({}));
auto er = table.equal_range("3");
EXPECT_EQ(er, table.equal_range(3));
EXPECT_NE(er, table.equal_range(4));
EXPECT_EQ(er, table.equal_range({"3"}));
EXPECT_NE(er, table.equal_range({}));
auto it = table.find("3");
EXPECT_EQ(it, table.find(3));
EXPECT_NE(it, table.find(4));
EXPECT_EQ(it, table.find({"3"}));
EXPECT_NE(it, table.find({}));
EXPECT_TRUE(table.contains(3));
EXPECT_FALSE(table.contains(4));
EXPECT_TRUE(table.count({"3"}));
EXPECT_FALSE(table.contains({}));
EXPECT_EQ(1, table.count(3));
EXPECT_EQ(0, table.count(4));
EXPECT_EQ(1, table.count({"3"}));
EXPECT_EQ(0, table.count({}));
auto copy = table;
copy.erase(3);
EXPECT_EQ(table.size() - 1, copy.size());
copy.erase(4);
EXPECT_EQ(table.size() - 1, copy.size());
copy.erase({"5"});
EXPECT_EQ(table.size() - 2, copy.size());
EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr));
if (std::is_class<T>()) TestHeterogeneous<const T &>(table);
}
TEST(Btree, HeterogeneousLookup) {
TestHeterogeneous(btree_set<std::string, CompareIntToString>{"1", "3", "5"});
TestHeterogeneous(btree_map<std::string, int, CompareIntToString>{
{"1", 1}, {"3", 3}, {"5", 5}});
TestHeterogeneous(
btree_multiset<std::string, CompareIntToString>{"1", "3", "5"});
TestHeterogeneous(btree_multimap<std::string, int, CompareIntToString>{
{"1", 1}, {"3", 3}, {"5", 5}});
btree_map<std::string, int, CompareIntToString> map{
{"", -1}, {"1", 1}, {"3", 3}, {"5", 5}};
EXPECT_EQ(1, map.at(1));
EXPECT_EQ(3, map.at({"3"}));
EXPECT_EQ(-1, map.at({}));
const auto &cmap = map;
EXPECT_EQ(1, cmap.at(1));
EXPECT_EQ(3, cmap.at({"3"}));
EXPECT_EQ(-1, cmap.at({}));
}
TEST(Btree, NoHeterogeneousLookupWithoutAlias) {
using StringSet = absl::btree_set<std::string, NonTransparentCompare>;
StringSet s;
ASSERT_TRUE(s.insert("hello").second);
ASSERT_TRUE(s.insert("world").second);
EXPECT_TRUE(s.end() == s.find("blah"));
EXPECT_TRUE(s.begin() == s.lower_bound("hello"));
EXPECT_EQ(1, s.count("world"));
EXPECT_TRUE(s.contains("hello"));
EXPECT_TRUE(s.contains("world"));
EXPECT_FALSE(s.contains("blah"));
using StringMultiSet =
absl::btree_multiset<std::string, NonTransparentCompare>;
StringMultiSet ms;
ms.insert("hello");
ms.insert("world");
ms.insert("world");
EXPECT_TRUE(ms.end() == ms.find("blah"));
EXPECT_TRUE(ms.begin() == ms.lower_bound("hello"));
EXPECT_EQ(2, ms.count("world"));
EXPECT_TRUE(ms.contains("hello"));
EXPECT_TRUE(ms.contains("world"));
EXPECT_FALSE(ms.contains("blah"));
}
TEST(Btree, DefaultTransparent) {
{
btree_set<int> s = {1};
double d = 1.1;
EXPECT_EQ(s.begin(), s.find(d));
EXPECT_TRUE(s.contains(d));
}
{
btree_set<std::string> s = {"A"};
EXPECT_EQ(s.begin(), s.find(absl::string_view("A")));
EXPECT_TRUE(s.contains(absl::string_view("A")));
}
}
class StringLike {
public:
StringLike() = default;
StringLike(const char *s) : s_(s) {
++constructor_calls_;
}
bool operator<(const StringLike &a) const { return s_ < a.s_; }
static void clear_constructor_call_count() { constructor_calls_ = 0; }
static int constructor_calls() { return constructor_calls_; }
private:
static int constructor_calls_;
std::string s_;
};
int StringLike::constructor_calls_ = 0;
TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) {
using StringSet = absl::btree_set<StringLike>;
StringSet s;
for (int i = 0; i < 100; ++i) {
ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second);
}
StringLike::clear_constructor_call_count();
s.find("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.contains("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.count("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.lower_bound("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.upper_bound("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.equal_range("50");
ASSERT_EQ(1, StringLike::constructor_calls());
StringLike::clear_constructor_call_count();
s.erase("50");
ASSERT_EQ(1, StringLike::constructor_calls());
}
struct SubstringLess {
SubstringLess() = delete;
explicit SubstringLess(int length) : n(length) {}
bool operator()(const std::string &a, const std::string &b) const {
return absl::string_view(a).substr(0, n) <
absl::string_view(b).substr(0, n);
}
int n;
};
TEST(Btree, SwapKeyCompare) {
using SubstringSet = absl::btree_set<std::string, SubstringLess>;
SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type());
SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type());
ASSERT_TRUE(s1.insert("a").second);
ASSERT_FALSE(s1.insert("aa").second);
ASSERT_TRUE(s2.insert("a").second);
ASSERT_TRUE(s2.insert("aa").second);
ASSERT_FALSE(s2.insert("aaa").second);
swap(s1, s2);
ASSERT_TRUE(s1.insert("b").second);
ASSERT_TRUE(s1.insert("bb").second);
ASSERT_FALSE(s1.insert("bbb").second);
ASSERT_TRUE(s2.insert("b").second);
ASSERT_FALSE(s2.insert("bb").second);
}
TEST(Btree, UpperBoundRegression) {
using SubstringSet = absl::btree_set<std::string, SubstringLess>;
SubstringSet my_set(SubstringLess(3));
my_set.insert("aab");
my_set.insert("abb");
SubstringSet::iterator it = my_set.upper_bound("aaa");
ASSERT_TRUE(it != my_set.end());
EXPECT_EQ("aab", *it);
}
TEST(Btree, Comparison) {
const int kSetSize = 1201;
absl::btree_set<int64_t> my_set;
for (int i = 0; i < kSetSize; ++i) {
my_set.insert(i);
}
absl::btree_set<int64_t> my_set_copy(my_set);
EXPECT_TRUE(my_set_copy == my_set);
EXPECT_TRUE(my_set == my_set_copy);
EXPECT_FALSE(my_set_copy != my_set);
EXPECT_FALSE(my_set != my_set_copy);
my_set.insert(kSetSize);
EXPECT_FALSE(my_set_copy == my_set);
EXPECT_FALSE(my_set == my_set_copy);
EXPECT_TRUE(my_set_copy != my_set);
EXPECT_TRUE(my_set != my_set_copy);
my_set.erase(kSetSize - 1);
EXPECT_FALSE(my_set_copy == my_set);
EXPECT_FALSE(my_set == my_set_copy);
EXPECT_TRUE(my_set_copy != my_set);
EXPECT_TRUE(my_set != my_set_copy);
absl::btree_map<std::string, int64_t> my_map;
for (int i = 0; i < kSetSize; ++i) {
my_map[std::string(i, 'a')] = i;
}
absl::btree_map<std::string, int64_t> my_map_copy(my_map);
EXPECT_TRUE(my_map_copy == my_map);
EXPECT_TRUE(my_map == my_map_copy);
EXPECT_FALSE(my_map_copy != my_map);
EXPECT_FALSE(my_map != my_map_copy);
++my_map_copy[std::string(7, 'a')];
EXPECT_FALSE(my_map_copy == my_map);
EXPECT_FALSE(my_map == my_map_copy);
EXPECT_TRUE(my_map_copy != my_map);
EXPECT_TRUE(my_map != my_map_copy);
my_map_copy = my_map;
my_map["hello"] = kSetSize;
EXPECT_FALSE(my_map_copy == my_map);
EXPECT_FALSE(my_map == my_map_copy);
EXPECT_TRUE(my_map_copy != my_map);
EXPECT_TRUE(my_map != my_map_copy);
my_map.erase(std::string(kSetSize - 1, 'a'));
EXPECT_FALSE(my_map_copy == my_map);
EXPECT_FALSE(my_map == my_map_copy);
EXPECT_TRUE(my_map_copy != my_map);
EXPECT_TRUE(my_map != my_map_copy);
}
TEST(Btree, RangeCtorSanity) {
std::vector<int> ivec;
ivec.push_back(1);
std::map<int, int> imap;
imap.insert(std::make_pair(1, 2));
absl::btree_multiset<int> tmset(ivec.begin(), ivec.end());
absl::btree_multimap<int, int> tmmap(imap.begin(), imap.end());
absl::btree_set<int> tset(ivec.begin(), ivec.end());
absl::btree_map<int, int> tmap(imap.begin(), imap.end());
EXPECT_EQ(1, tmset.size());
EXPECT_EQ(1, tmmap.size());
EXPECT_EQ(1, tset.size());
EXPECT_EQ(1, tmap.size());
}
}
class BtreeNodePeer {
public:
template <typename ValueType>
constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
return btree_node<
set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
256,
false>>::SizeWithNSlots(target_values_per_node);
}
template <typename Btree>
constexpr static size_t GetNumSlotsPerNode() {
return btree_node<typename Btree::params_type>::kNodeSlots;
}
template <typename Btree>
constexpr static size_t GetMaxFieldType() {
return std::numeric_limits<
typename btree_node<typename Btree::params_type>::field_type>::max();
}
template <typename Btree>
constexpr static bool UsesLinearNodeSearch() {
return btree_node<typename Btree::params_type>::use_linear_search::value;
}
template <typename Btree>
constexpr static bool FieldTypeEqualsSlotType() {
return std::is_same<
typename btree_node<typename Btree::params_type>::field_type,
typename btree_node<typename Btree::params_type>::slot_type>::value;
}
};
namespace {
class BtreeMapTest : public ::testing::Test {
public:
struct Key {};
struct Cmp {
template <typename T>
bool operator()(T, T) const {
return false;
}
};
struct KeyLin {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct CmpLin : Cmp {
using absl_btree_prefer_linear_node_search = std::true_type;
};
struct KeyBin {
using absl_btree_prefer_linear_node_search = std::false_type;
};
struct CmpBin : Cmp {
using absl_btree_prefer_linear_node_search = std::false_type;
};
template <typename K, typename C>
static bool IsLinear() {
return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
}
};
TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
}
TEST_F(BtreeMapTest, LinearChoiceTree) {
EXPECT_FALSE((IsLinear<Key, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
EXPECT_FALSE((IsLinear<int, CmpBin>()));
EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
EXPECT_TRUE((IsLinear<Key, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
EXPECT_TRUE((IsLinear<int, CmpLin>()));
EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
EXPECT_FALSE((IsLinear<Key, Cmp>()));
EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
EXPECT_TRUE((IsLinear<int, std::less<int>>()));
EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
EXPECT_FALSE((IsLinear<int, Cmp>()));
EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
}
TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
absl::btree_map<std::string, std::unique_ptr<std::string>> m;
std::unique_ptr<std::string> &v = m["A"];
EXPECT_TRUE(v == nullptr);
v = absl::make_unique<std::string>("X");
auto iter = m.find("A");
EXPECT_EQ("X", *iter->second);
}
TEST(Btree, InitializerListConstructor) {
absl::btree_set<std::string> set({"a", "b"});
EXPECT_EQ(set.count("a"), 1);
EXPECT_EQ(set.count("b"), 1);
absl::btree_multiset<int> mset({1, 1, 4});
EXPECT_EQ(mset.count(1), 2);
EXPECT_EQ(mset.count(4), 1);
absl::btree_map<int, int> map({{1, 5}, {2, 10}});
EXPECT_EQ(map[1], 5);
EXPECT_EQ(map[2], 10);
absl::btree_multimap<int, int> mmap({{1, 5}, {1, 10}});
auto range = mmap.equal_range(1);
auto it = range.first;
ASSERT_NE(it, range.second);
EXPECT_EQ(it->second, 5);
ASSERT_NE(++it, range.second);
EXPECT_EQ(it->second, 10);
EXPECT_EQ(++it, range.second);
}
TEST(Btree, InitializerListInsert) {
absl::btree_set<std::string> set;
set.insert({"a", "b"});
EXPECT_EQ(set.count("a"), 1);
EXPECT_EQ(set.count("b"), 1);
absl::btree_multiset<int> mset;
mset.insert({1, 1, 4});
EXPECT_EQ(mset.count(1), 2);
EXPECT_EQ(mset.count(4), 1);
absl::btree_map<int, int> map;
map.insert({{1, 5}, {2, 10}});
map.insert({3, 15});
EXPECT_EQ(map[1], 5);
EXPECT_EQ(map[2], 10);
EXPECT_EQ(map[3], 15);
absl::btree_multimap<int, int> mmap;
mmap.insert({{1, 5}, {1, 10}});
auto range = mmap.equal_range(1);
auto it = range.first;
ASSERT_NE(it, range.second);
EXPECT_EQ(it->second, 5);
ASSERT_NE(++it, range.second);
EXPECT_EQ(it->second, 10);
EXPECT_EQ(++it, range.second);
}
template <typename Compare, typename Key>
void AssertKeyCompareStringAdapted() {
using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
std::is_same<Adapted, StringBtreeDefaultLess>::value ||
std::is_same<Adapted, StringBtreeDefaultGreater>::value,
"key_compare_adapter should have string-adapted this comparator.");
}
template <typename Compare, typename Key>
void AssertKeyCompareNotStringAdapted() {
using Adapted = typename key_compare_adapter<Compare, Key>::type;
static_assert(
!std::is_same<Adapted, StringBtreeDefaultLess>::value &&
!std::is_same<Adapted, StringBtreeDefaultGreater>::value,
"key_compare_adapter shouldn't have string-adapted this comparator.");
}
TEST(Btree, KeyCompareAdapter) {
AssertKeyCompareStringAdapted<std::less<std::string>, std::string>();
AssertKeyCompareStringAdapted<std::greater<std::string>, std::string>();
AssertKeyCompareStringAdapted<std::less<absl::string_view>,
absl::string_view>();
AssertKeyCompareStringAdapted<std::greater<absl::string_view>,
absl::string_view>();
AssertKeyCompareStringAdapted<std::less<absl::Cord>, absl::Cord>();
AssertKeyCompareStringAdapted<std::greater<absl::Cord>, absl::Cord>();
AssertKeyCompareNotStringAdapted<std::less<int>, int>();
AssertKeyCompareNotStringAdapted<std::greater<int>, int>();
}
TEST(Btree, RValueInsert) {
InstanceTracker tracker;
absl::btree_set<MovableOnlyInstance> set;
set.insert(MovableOnlyInstance(1));
set.insert(MovableOnlyInstance(3));
MovableOnlyInstance two(2);
set.insert(set.find(MovableOnlyInstance(3)), std::move(two));
auto it = set.find(MovableOnlyInstance(2));
ASSERT_NE(it, set.end());
ASSERT_NE(++it, set.end());
EXPECT_EQ(it->value(), 3);
absl::btree_multiset<MovableOnlyInstance> mset;
MovableOnlyInstance zero(0);
MovableOnlyInstance zero2(0);
mset.insert(std::move(zero));
mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2));
EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2);
absl::btree_map<int, MovableOnlyInstance> map;
std::pair<const int, MovableOnlyInstance> p1 = {1, MovableOnlyInstance(5)};
std::pair<const int, MovableOnlyInstance> p2 = {2, MovableOnlyInstance(10)};
std::pair<const int, MovableOnlyInstance> p3 = {3, MovableOnlyInstance(15)};
map.insert(std::move(p1));
map.insert(std::move(p3));
map.insert(map.find(3), std::move(p2));
ASSERT_NE(map.find(2), map.end());
EXPECT_EQ(map.find(2)->second.value(), 10);
absl::btree_multimap<int, MovableOnlyInstance> mmap;
std::pair<const int, MovableOnlyInstance> p4 = {1, MovableOnlyInstance(5)};
std::pair<const int, MovableOnlyInstance> p5 = {1, MovableOnlyInstance(10)};
mmap.insert(std::move(p4));
mmap.insert(mmap.find(1), std::move(p5));
auto range = mmap.equal_range(1);
auto it1 = range.first;
ASSERT_NE(it1, range.second);
EXPECT_EQ(it1->second.value(), 10);
ASSERT_NE(++it1, range.second);
EXPECT_EQ(it1->second.value(), 5);
EXPECT_EQ(++it1, range.second);
EXPECT_EQ(tracker.copies(), 0);
EXPECT_EQ(tracker.swaps(), 0);
}
template <typename Cmp>
struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase {
using Cmp::Cmp;
CheckedCompareOptedOutCmp() {}
CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {}
};
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet
: public btree_set_container<btree<
set_params<Key, CheckedCompareOptedOutCmp<Cmp>, std::allocator<Key>,
BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode),
false>>> {
using Base = typename SizedBtreeSet::btree_set_container;
public:
SizedBtreeSet() = default;
using Base::Base;
};
template <typename Set>
void ExpectOperationCounts(const int expected_moves,
const int expected_comparisons,
const std::vector<int> &values,
InstanceTracker *tracker, Set *set) {
for (const int v : values) set->insert(MovableOnlyInstance(v));
set->clear();
EXPECT_EQ(tracker->moves(), expected_moves);
EXPECT_EQ(tracker->comparisons(), expected_comparisons);
EXPECT_EQ(tracker->copies(), 0);
EXPECT_EQ(tracker->swaps(), 0);
tracker->ResetCopiesMovesSwaps();
}
#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_HWADDRESS_SANITIZER)
constexpr bool kAsan = true;
#else
constexpr bool kAsan = false;
#endif
TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
InstanceTracker tracker;
SizedBtreeSet<MovableOnlyInstance, 4> set4;
SizedBtreeSet<MovableOnlyInstance, 61> set61;
SizedBtreeSet<MovableOnlyInstance, 100> set100;
std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, 23);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
BtreeGenerationsEnabled() ? 60 : 61);
}
ExpectOperationCounts(56540, 134212, values, &tracker, &set4);
ExpectOperationCounts(386718, 129807, values, &tracker, &set61);
ExpectOperationCounts(586761, 130310, values, &tracker, &set100);
std::sort(values.begin(), values.end());
ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
std::reverse(values.begin(), values.end());
ExpectOperationCounts(54949, 127531, values, &tracker, &set4);
ExpectOperationCounts(338813, 118266, values, &tracker, &set61);
ExpectOperationCounts(534529, 125279, values, &tracker, &set100);
}
struct MovableOnlyInstanceThreeWayCompare {
absl::weak_ordering operator()(const MovableOnlyInstance &a,
const MovableOnlyInstance &b) const {
return a.compare(b);
}
};
TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
if (kAsan) GTEST_SKIP() << "We do extra operations in ASan mode.";
InstanceTracker tracker;
SizedBtreeSet<MovableOnlyInstance, 4,
MovableOnlyInstanceThreeWayCompare>
set4;
SizedBtreeSet<MovableOnlyInstance, 61,
MovableOnlyInstanceThreeWayCompare>
set61;
SizedBtreeSet<MovableOnlyInstance, 100,
MovableOnlyInstanceThreeWayCompare>
set100;
std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, 23);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
BtreeGenerationsEnabled() ? 60 : 61);
}
ExpectOperationCounts(56540, 124221, values, &tracker, &set4);
ExpectOperationCounts(386718, 119816, values, &tracker, &set61);
ExpectOperationCounts(586761, 120319, values, &tracker, &set100);
std::sort(values.begin(), values.end());
ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
std::reverse(values.begin(), values.end());
ExpectOperationCounts(54949, 117532, values, &tracker, &set4);
ExpectOperationCounts(338813, 108267, values, &tracker, &set61);
ExpectOperationCounts(534529, 115280, values, &tracker, &set100);
}
struct NoDefaultCtor {
int num;
explicit NoDefaultCtor(int i) : num(i) {}
friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) {
return a.num < b.num;
}
};
TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) {
absl::btree_map<NoDefaultCtor, NoDefaultCtor> m;
for (int i = 1; i <= 99; ++i) {
SCOPED_TRACE(i);
EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second);
}
EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second);
auto iter99 = m.find(NoDefaultCtor(99));
ASSERT_NE(iter99, m.end());
EXPECT_EQ(iter99->second.num, 1);
auto iter1 = m.find(NoDefaultCtor(1));
ASSERT_NE(iter1, m.end());
EXPECT_EQ(iter1->second.num, 99);
auto iter50 = m.find(NoDefaultCtor(50));
ASSERT_NE(iter50, m.end());
EXPECT_EQ(iter50->second.num, 50);
auto iter25 = m.find(NoDefaultCtor(25));
ASSERT_NE(iter25, m.end());
EXPECT_EQ(iter25->second.num, 75);
}
TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) {
absl::btree_multimap<NoDefaultCtor, NoDefaultCtor> m;
for (int i = 1; i <= 99; ++i) {
SCOPED_TRACE(i);
m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i));
}
auto iter99 = m.find(NoDefaultCtor(99));
ASSERT_NE(iter99, m.end());
EXPECT_EQ(iter99->second.num, 1);
auto iter1 = m.find(NoDefaultCtor(1));
ASSERT_NE(iter1, m.end());
EXPECT_EQ(iter1->second.num, 99);
auto iter50 = m.find(NoDefaultCtor(50));
ASSERT_NE(iter50, m.end());
EXPECT_EQ(iter50->second.num, 50);
auto iter25 = m.find(NoDefaultCtor(25));
ASSERT_NE(iter25, m.end());
EXPECT_EQ(iter25->second.num, 75);
}
TEST(Btree, MapAt) {
absl::btree_map<int, int> map = {{1, 2}, {2, 4}};
EXPECT_EQ(map.at(1), 2);
EXPECT_EQ(map.at(2), 4);
map.at(2) = 8;
const absl::btree_map<int, int> &const_map = map;
EXPECT_EQ(const_map.at(1), 2);
EXPECT_EQ(const_map.at(2), 8);
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(map.at(3), std::out_of_range);
#else
EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at");
#endif
}
TEST(Btree, BtreeMultisetEmplace) {
const int value_to_insert = 123456;
absl::btree_multiset<int> s;
auto iter = s.emplace(value_to_insert);
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
iter = s.emplace(value_to_insert);
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
auto result = s.equal_range(value_to_insert);
EXPECT_EQ(std::distance(result.first, result.second), 2);
}
TEST(Btree, BtreeMultisetEmplaceHint) {
const int value_to_insert = 123456;
absl::btree_multiset<int> s;
auto iter = s.emplace(value_to_insert);
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
iter = s.emplace_hint(iter, value_to_insert);
EXPECT_EQ(iter, s.lower_bound(value_to_insert));
ASSERT_NE(iter, s.end());
EXPECT_EQ(*iter, value_to_insert);
}
TEST(Btree, BtreeMultimapEmplace) {
const int key_to_insert = 123456;
const char value0[] = "a";
absl::btree_multimap<int, std::string> m;
auto iter = m.emplace(key_to_insert, value0);
ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value0);
const char value1[] = "b";
iter = m.emplace(key_to_insert, value1);
ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value1);
auto result = m.equal_range(key_to_insert);
EXPECT_EQ(std::distance(result.first, result.second), 2);
}
TEST(Btree, BtreeMultimapEmplaceHint) {
const int key_to_insert = 123456;
const char value0[] = "a";
absl::btree_multimap<int, std::string> m;
auto iter = m.emplace(key_to_insert, value0);
ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value0);
const char value1[] = "b";
iter = m.emplace_hint(iter, key_to_insert, value1);
EXPECT_EQ(iter, m.lower_bound(key_to_insert));
ASSERT_NE(iter, m.end());
EXPECT_EQ(iter->first, key_to_insert);
EXPECT_EQ(iter->second, value1);
}
TEST(Btree, ConstIteratorAccessors) {
absl::btree_set<int> set;
for (int i = 0; i < 100; ++i) {
set.insert(i);
}
auto it = set.cbegin();
auto r_it = set.crbegin();
for (int i = 0; i < 100; ++i, ++it, ++r_it) {
ASSERT_EQ(*it, i);
ASSERT_EQ(*r_it, 99 - i);
}
EXPECT_EQ(it, set.cend());
EXPECT_EQ(r_it, set.crend());
}
TEST(Btree, StrSplitCompatible) {
const absl::btree_set<std::string> split_set = absl::StrSplit("a,b,c", ',');
const absl::btree_set<std::string> expected_set = {"a", "b", "c"};
EXPECT_EQ(split_set, expected_set);
}
TEST(Btree, KeyComp) {
absl::btree_set<int> s;
EXPECT_TRUE(s.key_comp()(1, 2));
EXPECT_FALSE(s.key_comp()(2, 2));
EXPECT_FALSE(s.key_comp()(2, 1));
absl::btree_map<int, int> m1;
EXPECT_TRUE(m1.key_comp()(1, 2));
EXPECT_FALSE(m1.key_comp()(2, 2));
EXPECT_FALSE(m1.key_comp()(2, 1));
absl::btree_map<std::string, int> m2;
EXPECT_TRUE(m2.key_comp()("a", "b"));
EXPECT_FALSE(m2.key_comp()("b", "b"));
EXPECT_FALSE(m2.key_comp()("b", "a"));
}
TEST(Btree, ValueComp) {
absl::btree_set<int> s;
EXPECT_TRUE(s.value_comp()(1, 2));
EXPECT_FALSE(s.value_comp()(2, 2));
EXPECT_FALSE(s.value_comp()(2, 1));
absl::btree_map<int, int> m1;
EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0)));
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0)));
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0)));
absl::btree_map<std::string, int> m2;
EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)));
EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)));
EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)));
}
TEST(Btree, MapValueCompProtected) {
struct key_compare {
bool operator()(int l, int r) const { return l < r; }
int id;
};
using value_compare = absl::btree_map<int, int, key_compare>::value_compare;
struct value_comp_child : public value_compare {
explicit value_comp_child(key_compare kc) : value_compare(kc) {}
int GetId() const { return comp.id; }
};
value_comp_child c(key_compare{10});
EXPECT_EQ(c.GetId(), 10);
}
TEST(Btree, DefaultConstruction) {
absl::btree_set<int> s;
absl::btree_map<int, int> m;
absl::btree_multiset<int> ms;
absl::btree_multimap<int, int> mm;
EXPECT_TRUE(s.empty());
EXPECT_TRUE(m.empty());
EXPECT_TRUE(ms.empty());
EXPECT_TRUE(mm.empty());
}
TEST(Btree, SwissTableHashable) {
static constexpr int kValues = 10000;
std::vector<int> values(kValues);
std::iota(values.begin(), values.end(), 0);
std::vector<std::pair<int, int>> map_values;
for (int v : values) map_values.emplace_back(v, -v);
using set = absl::btree_set<int>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
set{},
set{1},
set{2},
set{1, 2},
set{2, 1},
set(values.begin(), values.end()),
set(values.rbegin(), values.rend()),
}));
using mset = absl::btree_multiset<int>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
mset{},
mset{1},
mset{1, 1},
mset{2},
mset{2, 2},
mset{1, 2},
mset{1, 1, 2},
mset{1, 2, 2},
mset{1, 1, 2, 2},
mset(values.begin(), values.end()),
mset(values.rbegin(), values.rend()),
}));
using map = absl::btree_map<int, int>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
map{},
map{{1, 0}},
map{{1, 1}},
map{{2, 0}},
map{{2, 2}},
map{{1, 0}, {2, 1}},
map(map_values.begin(), map_values.end()),
map(map_values.rbegin(), map_values.rend()),
}));
using mmap = absl::btree_multimap<int, int>;
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
mmap{},
mmap{{1, 0}},
mmap{{1, 1}},
mmap{{1, 0}, {1, 1}},
mmap{{1, 1}, {1, 0}},
mmap{{2, 0}},
mmap{{2, 2}},
mmap{{1, 0}, {2, 1}},
mmap(map_values.begin(), map_values.end()),
mmap(map_values.rbegin(), map_values.rend()),
}));
}
TEST(Btree, ComparableSet) {
absl::btree_set<int> s1 = {1, 2};
absl::btree_set<int> s2 = {2, 3};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_LE(s1, s1);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
EXPECT_GE(s1, s1);
}
TEST(Btree, ComparableSetsDifferentLength) {
absl::btree_set<int> s1 = {1, 2};
absl::btree_set<int> s2 = {1, 2, 3};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
}
TEST(Btree, ComparableMultiset) {
absl::btree_multiset<int> s1 = {1, 2};
absl::btree_multiset<int> s2 = {2, 3};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_LE(s1, s1);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
EXPECT_GE(s1, s1);
}
TEST(Btree, ComparableMap) {
absl::btree_map<int, int> s1 = {{1, 2}};
absl::btree_map<int, int> s2 = {{2, 3}};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_LE(s1, s1);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
EXPECT_GE(s1, s1);
}
TEST(Btree, ComparableMultimap) {
absl::btree_multimap<int, int> s1 = {{1, 2}};
absl::btree_multimap<int, int> s2 = {{2, 3}};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_LE(s1, s1);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
EXPECT_GE(s1, s1);
}
TEST(Btree, ComparableSetWithCustomComparator) {
absl::btree_set<int, std::greater<int>> s1 = {1, 2};
absl::btree_set<int, std::greater<int>> s2 = {2, 3};
EXPECT_LT(s1, s2);
EXPECT_LE(s1, s2);
EXPECT_LE(s1, s1);
EXPECT_GT(s2, s1);
EXPECT_GE(s2, s1);
EXPECT_GE(s1, s1);
}
TEST(Btree, EraseReturnsIterator) {
absl::btree_set<int> set = {1, 2, 3, 4, 5};
auto result_it = set.erase(set.begin(), set.find(3));
EXPECT_EQ(result_it, set.find(3));
result_it = set.erase(set.find(5));
EXPECT_EQ(result_it, set.end());
}
TEST(Btree, ExtractAndInsertNodeHandleSet) {
absl::btree_set<int> src1 = {1, 2, 3, 4, 5};
auto nh = src1.extract(src1.find(3));
EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5));
absl::btree_set<int> other;
absl::btree_set<int>::insert_return_type res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(3));
EXPECT_EQ(res.position, other.find(3));
EXPECT_TRUE(res.inserted);
EXPECT_TRUE(res.node.empty());
absl::btree_set<int> src2 = {3, 4};
nh = src2.extract(src2.find(3));
EXPECT_THAT(src2, ElementsAre(4));
res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(3));
EXPECT_EQ(res.position, other.find(3));
EXPECT_FALSE(res.inserted);
ASSERT_FALSE(res.node.empty());
EXPECT_EQ(res.node.value(), 3);
}
template <typename Set>
void TestExtractWithTrackingForSet() {
InstanceTracker tracker;
{
Set s;
const size_t kSize = 1000;
while (s.size() < kSize) {
s.insert(MovableOnlyInstance(s.size()));
}
for (int i = 0; i < kSize; ++i) {
auto nh = s.extract(MovableOnlyInstance(i));
EXPECT_EQ(s.size(), kSize - 1);
EXPECT_EQ(nh.value().value(), i);
s.insert(std::move(nh));
EXPECT_EQ(s.size(), kSize);
auto it = s.find(MovableOnlyInstance(i));
nh = s.extract(it);
EXPECT_EQ(s.size(), kSize - 1);
EXPECT_EQ(nh.value().value(), i);
s.insert(s.begin(), std::move(nh));
EXPECT_EQ(s.size(), kSize);
}
}
EXPECT_EQ(0, tracker.instances());
}
template <typename Map>
void TestExtractWithTrackingForMap() {
InstanceTracker tracker;
{
Map m;
const size_t kSize = 1000;
while (m.size() < kSize) {
m.insert(
{CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())});
}
for (int i = 0; i < kSize; ++i) {
auto nh = m.extract(CopyableMovableInstance(i));
EXPECT_EQ(m.size(), kSize - 1);
EXPECT_EQ(nh.key().value(), i);
EXPECT_EQ(nh.mapped().value(), i);
m.insert(std::move(nh));
EXPECT_EQ(m.size(), kSize);
auto it = m.find(CopyableMovableInstance(i));
nh = m.extract(it);
EXPECT_EQ(m.size(), kSize - 1);
EXPECT_EQ(nh.key().value(), i);
EXPECT_EQ(nh.mapped().value(), i);
m.insert(m.begin(), std::move(nh));
EXPECT_EQ(m.size(), kSize);
}
}
EXPECT_EQ(0, tracker.instances());
}
TEST(Btree, ExtractTracking) {
TestExtractWithTrackingForSet<absl::btree_set<MovableOnlyInstance>>();
TestExtractWithTrackingForSet<absl::btree_multiset<MovableOnlyInstance>>();
TestExtractWithTrackingForMap<
absl::btree_map<CopyableMovableInstance, MovableOnlyInstance>>();
TestExtractWithTrackingForMap<
absl::btree_multimap<CopyableMovableInstance, MovableOnlyInstance>>();
}
TEST(Btree, ExtractAndInsertNodeHandleMultiSet) {
absl::btree_multiset<int> src1 = {1, 2, 3, 3, 4, 5};
auto nh = src1.extract(src1.find(3));
EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5));
absl::btree_multiset<int> other;
auto res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(3));
EXPECT_EQ(res, other.find(3));
absl::btree_multiset<int> src2 = {3, 4};
nh = src2.extract(src2.find(3));
EXPECT_THAT(src2, ElementsAre(4));
res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(3, 3));
EXPECT_EQ(res, ++other.find(3));
}
TEST(Btree, ExtractAndInsertNodeHandleMap) {
absl::btree_map<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}};
auto nh = src1.extract(src1.find(3));
EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6)));
absl::btree_map<int, int> other;
absl::btree_map<int, int>::insert_return_type res =
other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
EXPECT_EQ(res.position, other.find(3));
EXPECT_TRUE(res.inserted);
EXPECT_TRUE(res.node.empty());
absl::btree_map<int, int> src2 = {{3, 6}};
nh = src2.extract(src2.find(3));
EXPECT_TRUE(src2.empty());
res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
EXPECT_EQ(res.position, other.find(3));
EXPECT_FALSE(res.inserted);
ASSERT_FALSE(res.node.empty());
EXPECT_EQ(res.node.key(), 3);
EXPECT_EQ(res.node.mapped(), 6);
}
TEST(Btree, ExtractAndInsertNodeHandleMultiMap) {
absl::btree_multimap<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}};
auto nh = src1.extract(src1.find(3));
EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6)));
absl::btree_multimap<int, int> other;
auto res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
EXPECT_EQ(res, other.find(3));
absl::btree_multimap<int, int> src2 = {{3, 6}};
nh = src2.extract(src2.find(3));
EXPECT_TRUE(src2.empty());
res = other.insert(std::move(nh));
EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6)));
EXPECT_EQ(res, ++other.begin());
}
TEST(Btree, ExtractMultiMapEquivalentKeys) {
absl::btree_multimap<std::string, int> map;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
map.insert({absl::StrCat(i), j});
}
}
for (int i = 0; i < 100; ++i) {
const std::string key = absl::StrCat(i);
auto node_handle = map.extract(key);
EXPECT_EQ(node_handle.key(), key);
EXPECT_EQ(node_handle.mapped(), 0) << i;
}
for (int i = 0; i < 100; ++i) {
const std::string key = absl::StrCat(i);
auto node_handle = map.extract(key);
EXPECT_EQ(node_handle.key(), key);
EXPECT_EQ(node_handle.mapped(), 1) << i;
}
}
TEST(Btree, ExtractAndGetNextSet) {
absl::btree_set<int> src = {1, 2, 3, 4, 5};
auto it = src.find(3);
auto extracted_and_next = src.extract_and_get_next(it);
EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
EXPECT_EQ(extracted_and_next.node.value(), 3);
EXPECT_EQ(*extracted_and_next.next, 4);
}
TEST(Btree, ExtractAndGetNextMultiSet) {
absl::btree_multiset<int> src = {1, 2, 3, 4, 5};
auto it = src.find(3);
auto extracted_and_next = src.extract_and_get_next(it);
EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
EXPECT_EQ(extracted_and_next.node.value(), 3);
EXPECT_EQ(*extracted_and_next.next, 4);
}
TEST(Btree, ExtractAndGetNextMap) {
absl::btree_map<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
auto it = src.find(3);
auto extracted_and_next = src.extract_and_get_next(it);
EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
EXPECT_EQ(extracted_and_next.node.key(), 3);
EXPECT_EQ(extracted_and_next.node.mapped(), 4);
EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
}
TEST(Btree, ExtractAndGetNextMultiMap) {
absl::btree_multimap<int, int> src = {{1, 2}, {3, 4}, {5, 6}};
auto it = src.find(3);
auto extracted_and_next = src.extract_and_get_next(it);
EXPECT_THAT(src, ElementsAre(Pair(1, 2), Pair(5, 6)));
EXPECT_EQ(extracted_and_next.node.key(), 3);
EXPECT_EQ(extracted_and_next.node.mapped(), 4);
EXPECT_THAT(*extracted_and_next.next, Pair(5, 6));
}
TEST(Btree, ExtractAndGetNextEndIter) {
absl::btree_set<int> src = {1, 2, 3, 4, 5};
auto it = src.find(5);
auto extracted_and_next = src.extract_and_get_next(it);
EXPECT_THAT(src, ElementsAre(1, 2, 3, 4));
EXPECT_EQ(extracted_and_next.node.value(), 5);
EXPECT_EQ(extracted_and_next.next, src.end());
}
TEST(Btree, ExtractDoesntCauseExtraMoves) {
#ifdef _MSC_VER
GTEST_SKIP() << "This test fails on MSVC.";
#endif
using Set = absl::btree_set<MovableOnlyInstance>;
std::array<std::function<void(Set &)>, 3> extracters = {
[](Set &s) { auto node = s.extract(s.begin()); },
[](Set &s) { auto ret = s.extract_and_get_next(s.begin()); },
[](Set &s) { auto node = s.extract(MovableOnlyInstance(0)); }};
InstanceTracker tracker;
for (int i = 0; i < 3; ++i) {
Set s;
s.insert(MovableOnlyInstance(0));
tracker.ResetCopiesMovesSwaps();
extracters[i](s);
EXPECT_EQ(tracker.copies(), 0) << i;
EXPECT_EQ(tracker.moves(), 1) << i;
EXPECT_EQ(tracker.swaps(), 0) << i;
}
}
struct InsertMultiHintData {
int key;
int not_key;
bool operator==(const InsertMultiHintData other) const {
return key == other.key && not_key == other.not_key;
}
};
struct InsertMultiHintDataKeyCompare {
using is_transparent = void;
bool operator()(const InsertMultiHintData a,
const InsertMultiHintData b) const {
return a.key < b.key;
}
bool operator()(const int a, const InsertMultiHintData b) const {
return a < b.key;
}
bool operator()(const InsertMultiHintData a, const int b) const {
return a.key < b;
}
};
TEST(Btree, InsertHintNodeHandle) {
{
absl::btree_set<int> src = {1, 2, 3, 4, 5};
auto nh = src.extract(src.find(3));
EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
absl::btree_set<int> other = {0, 100};
auto it = other.insert(other.lower_bound(3), std::move(nh));
EXPECT_THAT(other, ElementsAre(0, 3, 100));
EXPECT_EQ(it, other.find(3));
nh = src.extract(src.find(5));
it = other.insert(other.end(), std::move(nh));
EXPECT_THAT(other, ElementsAre(0, 3, 5, 100));
EXPECT_EQ(it, other.find(5));
}
absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare> src =
{{1, 2}, {3, 4}, {3, 5}};
auto nh = src.extract(src.lower_bound(3));
EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4}));
absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare>
other = {{3, 1}, {3, 2}, {3, 3}};
auto it = other.insert(--other.end(), std::move(nh));
EXPECT_THAT(
other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2},
InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3}));
EXPECT_EQ(it, --(--other.end()));
nh = src.extract(src.find(3));
EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5}));
it = other.insert(other.begin(), std::move(nh));
EXPECT_THAT(other,
ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1},
InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4},
InsertMultiHintData{3, 3}));
EXPECT_EQ(it, other.begin());
}
struct IntCompareToCmp {
absl::weak_ordering operator()(int a, int b) const {
if (a < b) return absl::weak_ordering::less;
if (a > b) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
};
TEST(Btree, MergeIntoUniqueContainers) {
absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
absl::btree_multiset<int> src2 = {3, 4, 4, 5};
absl::btree_set<int> dst;
dst.merge(src1);
EXPECT_TRUE(src1.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3));
dst.merge(src2);
EXPECT_THAT(src2, ElementsAre(3, 4));
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5));
}
TEST(Btree, MergeIntoUniqueContainersWithCompareTo) {
absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
absl::btree_multiset<int> src2 = {3, 4, 4, 5};
absl::btree_set<int, IntCompareToCmp> dst;
dst.merge(src1);
EXPECT_TRUE(src1.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3));
dst.merge(src2);
EXPECT_THAT(src2, ElementsAre(3, 4));
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5));
}
TEST(Btree, MergeIntoMultiContainers) {
absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
absl::btree_multiset<int> src2 = {3, 4, 4, 5};
absl::btree_multiset<int> dst;
dst.merge(src1);
EXPECT_TRUE(src1.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3));
dst.merge(src2);
EXPECT_TRUE(src2.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5));
}
TEST(Btree, MergeIntoMultiContainersWithCompareTo) {
absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
absl::btree_multiset<int> src2 = {3, 4, 4, 5};
absl::btree_multiset<int, IntCompareToCmp> dst;
dst.merge(src1);
EXPECT_TRUE(src1.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3));
dst.merge(src2);
EXPECT_TRUE(src2.empty());
EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5));
}
TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
absl::btree_map<int, int, IntCompareToCmp> src1 = {{1, 1}, {2, 2}, {3, 3}};
absl::btree_multimap<int, int, std::greater<int>> src2 = {
{5, 5}, {4, 1}, {4, 4}, {3, 2}};
absl::btree_multimap<int, int> dst;
dst.merge(src1);
EXPECT_TRUE(src1.empty());
EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3)));
dst.merge(src2);
EXPECT_TRUE(src2.empty());
EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2),
Pair(4, 1), Pair(4, 4), Pair(5, 5)));
}
TEST(Btree, MergeIntoSetMovableOnly) {
absl::btree_set<MovableOnlyInstance> src;
src.insert(MovableOnlyInstance(1));
absl::btree_multiset<MovableOnlyInstance> dst1;
dst1.insert(MovableOnlyInstance(2));
absl::btree_set<MovableOnlyInstance> dst2;
dst1.merge(src);
EXPECT_TRUE(src.empty());
ASSERT_THAT(dst1, SizeIs(2));
EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
dst2.merge(dst1);
EXPECT_TRUE(dst1.empty());
ASSERT_THAT(dst2, SizeIs(2));
EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
}
struct KeyCompareToWeakOrdering {
template <typename T>
absl::weak_ordering operator()(const T &a, const T &b) const {
return a < b ? absl::weak_ordering::less
: a == b ? absl::weak_ordering::equivalent
: absl::weak_ordering::greater;
}
};
struct KeyCompareToStrongOrdering {
template <typename T>
absl::strong_ordering operator()(const T &a, const T &b) const {
return a < b ? absl::strong_ordering::less
: a == b ? absl::strong_ordering::equal
: absl::strong_ordering::greater;
}
};
TEST(Btree, UserProvidedKeyCompareToComparators) {
absl::btree_set<int, KeyCompareToWeakOrdering> weak_set = {1, 2, 3};
EXPECT_TRUE(weak_set.contains(2));
EXPECT_FALSE(weak_set.contains(4));
absl::btree_set<int, KeyCompareToStrongOrdering> strong_set = {1, 2, 3};
EXPECT_TRUE(strong_set.contains(2));
EXPECT_FALSE(strong_set.contains(4));
}
TEST(Btree, TryEmplaceBasicTest) {
absl::btree_map<int, std::string> m;
m.try_emplace(1, "one");
EXPECT_EQ(1, m.size());
const int key(42);
m.try_emplace(key, 3, 'a');
m.try_emplace(2, std::string("two"));
EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, std::string>>{
{1, "one"}, {2, "two"}, {42, "aaa"}}));
}
TEST(Btree, TryEmplaceWithHintWorks) {
int calls = 0;
auto cmp = [&calls](int x, int y) {
++calls;
return x < y;
};
using Cmp = decltype(cmp);
absl::btree_map<int, int, CheckedCompareOptedOutCmp<Cmp>> m(cmp);
for (int i = 0; i < 128; ++i) {
m.emplace(i, i);
}
calls = 0;
m.emplace(127, 127);
EXPECT_GE(calls, 4);
calls = 0;
auto it = m.try_emplace(m.begin(), -1, -1);
EXPECT_EQ(129, m.size());
EXPECT_EQ(it, m.begin());
EXPECT_LE(calls, 2);
calls = 0;
std::pair<int, int> pair1024 = {1024, 1024};
it = m.try_emplace(m.end(), pair1024.first, pair1024.second);
EXPECT_EQ(130, m.size());
EXPECT_EQ(it, --m.end());
EXPECT_LE(calls, 2);
calls = 0;
it = m.try_emplace(m.end(), 16, 17);
EXPECT_EQ(130, m.size());
EXPECT_GE(calls, 4);
EXPECT_EQ(it, m.find(16));
calls = 0;
it = m.try_emplace(it, 16, 17);
EXPECT_EQ(130, m.size());
EXPECT_LE(calls, 2);
EXPECT_EQ(it, m.find(16));
m.erase(2);
EXPECT_EQ(129, m.size());
auto hint = m.find(3);
calls = 0;
m.try_emplace(hint, 2, 2);
EXPECT_EQ(130, m.size());
EXPECT_LE(calls, 2);
EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
}
TEST(Btree, TryEmplaceWithBadHint) {
absl::btree_map<int, int> m = {{1, 1}, {9, 9}};
auto it = m.try_emplace(m.begin(), 2, 2);
EXPECT_EQ(it, ++m.begin());
EXPECT_THAT(m, ElementsAreArray(
std::vector<std::pair<int, int>>{{1, 1}, {2, 2}, {9, 9}}));
it = m.try_emplace(++(++m.begin()), 0, 0);
EXPECT_EQ(it, m.begin());
EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, int>>{
{0, 0}, {1, 1}, {2, 2}, {9, 9}}));
}
TEST(Btree, TryEmplaceMaintainsSortedOrder) {
absl::btree_map<int, std::string> m;
std::pair<int, std::string> pair5 = {5, "five"};
m.try_emplace(10, "ten");
m.try_emplace(pair5.first, pair5.second);
EXPECT_EQ(2, m.size());
EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
int int100{100};
m.try_emplace(int100, "hundred");
m.try_emplace(1, "one");
EXPECT_EQ(4, m.size());
EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
}
TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) {
absl::btree_map<int, int> m;
m.try_emplace(m.end(), 1);
EXPECT_EQ(0, m[1]);
}
TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) {
absl::btree_map<int, std::string> m;
m.try_emplace(m.end(), 1, 10, 'a');
EXPECT_EQ(std::string(10, 'a'), m[1]);
}
template <typename Alloc>
using BtreeSetAlloc = absl::btree_set<int, std::less<int>, Alloc>;
TEST(Btree, AllocatorPropagation) {
TestAllocPropagation<BtreeSetAlloc>();
}
TEST(Btree, MinimumAlignmentAllocator) {
absl::btree_set<int8_t, std::less<int8_t>, MinimumAlignmentAlloc<int8_t>> set;
for (int8_t i = 0; i < 100; ++i) set.insert(i);
set.erase(set.find(50), set.end());
for (int8_t i = 51; i < 101; ++i) set.insert(i);
EXPECT_EQ(set.size(), 100);
}
TEST(Btree, EmptyTree) {
absl::btree_set<int> s;
EXPECT_TRUE(s.empty());
EXPECT_EQ(s.size(), 0);
EXPECT_GT(s.max_size(), 0);
}
bool IsEven(int k) { return k % 2 == 0; }
TEST(Btree, EraseIf) {
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3);
EXPECT_THAT(s, ElementsAre(1, 3));
}
{
absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100};
EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3);
EXPECT_THAT(s, ElementsAre(5, 6, 6, 100));
}
{
absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}};
EXPECT_EQ(
erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; }),
2);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3)));
}
{
absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6},
{6, 6}, {6, 7}, {100, 6}};
EXPECT_EQ(
erase_if(m,
[](std::pair<const int, int> kv) { return kv.second == 6; }),
3);
EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7)));
}
{
absl::btree_set<int> s;
for (int i = 0; i < 1000; ++i) s.insert(2 * i);
EXPECT_EQ(erase_if(s, IsEven), 1000);
EXPECT_THAT(s, IsEmpty());
}
{
absl::btree_set<int> s = {1, 3, 5, 6, 100};
EXPECT_EQ(erase_if(s, &IsEven), 2);
EXPECT_THAT(s, ElementsAre(1, 3, 5));
}
{
absl::btree_set<int> s;
for (int i = 0; i < 1000; ++i) s.insert(i);
int pred_calls = 0;
EXPECT_EQ(erase_if(s,
[&pred_calls](int k) {
++pred_calls;
return k % 2;
}),
500);
EXPECT_THAT(s, SizeIs(500));
EXPECT_EQ(pred_calls, 1000);
}
}
TEST(Btree, InsertOrAssign) {
absl::btree_map<int, int> m = {{1, 1}, {3, 3}};
using value_type = typename decltype(m)::value_type;
auto ret = m.insert_or_assign(4, 4);
EXPECT_EQ(*ret.first, value_type(4, 4));
EXPECT_TRUE(ret.second);
ret = m.insert_or_assign(3, 100);
EXPECT_EQ(*ret.first, value_type(3, 100));
EXPECT_FALSE(ret.second);
auto hint_ret = m.insert_or_assign(ret.first, 3, 200);
EXPECT_EQ(*hint_ret, value_type(3, 200));
hint_ret = m.insert_or_assign(m.find(1), 0, 1);
EXPECT_EQ(*hint_ret, value_type(0, 1));
hint_ret = m.insert_or_assign(m.end(), -1, 1);
EXPECT_EQ(*hint_ret, value_type(-1, 1));
EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200),
Pair(4, 4)));
}
TEST(Btree, InsertOrAssignMovableOnly) {
absl::btree_map<int, MovableOnlyInstance> m;
using value_type = typename decltype(m)::value_type;
auto ret = m.insert_or_assign(4, MovableOnlyInstance(4));
EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4)));
EXPECT_TRUE(ret.second);
ret = m.insert_or_assign(4, MovableOnlyInstance(100));
EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100)));
EXPECT_FALSE(ret.second);
auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200));
EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200)));
EXPECT_EQ(m.size(), 2);
}
TEST(Btree, BitfieldArgument) {
union {
int n : 1;
};
n = 0;
absl::btree_map<int, int> m;
m.erase(n);
m.count(n);
m.find(n);
m.contains(n);
m.equal_range(n);
m.insert_or_assign(n, n);
m.insert_or_assign(m.end(), n, n);
m.try_emplace(n);
m.try_emplace(m.end(), n);
m.at(n);
m[n];
}
TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) {
const absl::string_view names[] = {"n1", "n2"};
absl::btree_set<std::string> name_set1{std::begin(names), std::end(names)};
EXPECT_THAT(name_set1, ElementsAreArray(names));
absl::btree_set<std::string> name_set2;
name_set2.insert(std::begin(names), std::end(names));
EXPECT_THAT(name_set2, ElementsAreArray(names));
}
struct ConstructorCounted {
explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; }
bool operator==(int other) const { return i == other; }
int i;
static int constructor_calls;
};
int ConstructorCounted::constructor_calls = 0;
struct ConstructorCountedCompare {
bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; }
bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; }
bool operator()(const ConstructorCounted &a,
const ConstructorCounted &b) const {
return a.i < b.i;
}
using is_transparent = void;
};
TEST(Btree,
SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
const int i[] = {0, 1, 1};
ConstructorCounted::constructor_calls = 0;
absl::btree_set<ConstructorCounted, ConstructorCountedCompare> set{
std::begin(i), std::end(i)};
EXPECT_THAT(set, ElementsAre(0, 1));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
set.insert(std::begin(i), std::end(i));
EXPECT_THAT(set, ElementsAre(0, 1));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
}
TEST(Btree,
SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
const int i[] = {0, 1};
absl::btree_set<std::vector<void *>> s1{std::begin(i), std::end(i)};
EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
absl::btree_set<std::vector<void *>> s2;
s2.insert(std::begin(i), std::end(i));
EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
}
#if !defined(__GLIBCXX__) || \
(defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7)
TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) {
const std::pair<absl::string_view, int> names[] = {{"n1", 1}, {"n2", 2}};
absl::btree_map<std::string, int> name_map1{std::begin(names),
std::end(names)};
EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
absl::btree_map<std::string, int> name_map2;
name_map2.insert(std::begin(names), std::end(names));
EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
}
TEST(Btree,
MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
const std::pair<int, int> i[] = {{0, 1}, {1, 2}, {1, 3}};
ConstructorCounted::constructor_calls = 0;
absl::btree_map<ConstructorCounted, int, ConstructorCountedCompare> map{
std::begin(i), std::end(i)};
EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
map.insert(std::begin(i), std::end(i));
EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
}
TEST(Btree,
MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
const std::pair<int, int> i[] = {{0, 1}, {1, 2}};
absl::btree_map<std::vector<void *>, int> m1{std::begin(i), std::end(i)};
EXPECT_THAT(m1,
ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
absl::btree_map<std::vector<void *>, int> m2;
m2.insert(std::begin(i), std::end(i));
EXPECT_THAT(m2,
ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
}
TEST(Btree, HeterogeneousTryEmplace) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m.try_emplace(sv, 1);
EXPECT_EQ(m[s], 1);
m.try_emplace(m.end(), sv, 2);
EXPECT_EQ(m[s], 1);
}
TEST(Btree, HeterogeneousOperatorMapped) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m[sv] = 1;
EXPECT_EQ(m[s], 1);
m[sv] = 2;
EXPECT_EQ(m[s], 2);
}
TEST(Btree, HeterogeneousInsertOrAssign) {
absl::btree_map<std::string, int> m;
std::string s = "key";
absl::string_view sv = s;
m.insert_or_assign(sv, 1);
EXPECT_EQ(m[s], 1);
m.insert_or_assign(m.end(), sv, 2);
EXPECT_EQ(m[s], 2);
}
#endif
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(Btree, NodeHandleMutableKeyAccess) {
{
absl::btree_map<std::string, std::string> map;
map["key1"] = "mapped";
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
}
{
absl::btree_multimap<std::string, std::string> map;
map.emplace("key1", "mapped");
auto nh = map.extract(map.begin());
nh.key().resize(3);
map.insert(std::move(nh));
EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
}
}
#endif
struct MultiKey {
int i1;
int i2;
};
bool operator==(const MultiKey a, const MultiKey b) {
return a.i1 == b.i1 && a.i2 == b.i2;
}
struct MultiKeyComp {
using is_transparent = void;
bool operator()(const MultiKey a, const MultiKey b) const {
if (a.i1 != b.i1) return a.i1 < b.i1;
return a.i2 < b.i2;
}
bool operator()(const int a, const MultiKey b) const { return a < b.i1; }
bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
};
struct MultiKeyThreeWayComp {
using is_transparent = void;
absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const {
if (a.i1 < b.i1) return absl::weak_ordering::less;
if (a.i1 > b.i1) return absl::weak_ordering::greater;
if (a.i2 < b.i2) return absl::weak_ordering::less;
if (a.i2 > b.i2) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const int a, const MultiKey b) const {
if (a < b.i1) return absl::weak_ordering::less;
if (a > b.i1) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
absl::weak_ordering operator()(const MultiKey a, const int b) const {
if (a.i1 < b) return absl::weak_ordering::less;
if (a.i1 > b) return absl::weak_ordering::greater;
return absl::weak_ordering::equivalent;
}
};
template <typename Compare>
class BtreeMultiKeyTest : public ::testing::Test {};
using MultiKeyComps = ::testing::Types<MultiKeyComp, MultiKeyThreeWayComp>;
TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps);
TYPED_TEST(BtreeMultiKeyTest, EqualRange) {
absl::btree_set<MultiKey, TypeParam> set;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
set.insert({i, j});
}
}
for (int i = 0; i < 100; ++i) {
auto equal_range = set.equal_range(i);
EXPECT_EQ(equal_range.first->i1, i);
EXPECT_EQ(equal_range.first->i2, 0) << i;
EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
}
}
TYPED_TEST(BtreeMultiKeyTest, Extract) {
absl::btree_set<MultiKey, TypeParam> set;
for (int i = 0; i < 100; ++i) {
for (int j = 0; j < 100; ++j) {
set.insert({i, j});
}
}
for (int i = 0; i < 100; ++i) {
auto node_handle = set.extract(i);
EXPECT_EQ(node_handle.value().i1, i);
EXPECT_EQ(node_handle.value().i2, 0) << i;
}
for (int i = 0; i < 100; ++i) {
auto node_handle = set.extract(i);
EXPECT_EQ(node_handle.value().i1, i);
EXPECT_EQ(node_handle.value().i2, 1) << i;
}
}
TYPED_TEST(BtreeMultiKeyTest, Erase) {
absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.erase(2), 2);
EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
}
TYPED_TEST(BtreeMultiKeyTest, Count) {
const absl::btree_set<MultiKey, TypeParam> set = {
{1, 1}, {2, 1}, {2, 2}, {3, 1}};
EXPECT_EQ(set.count(2), 2);
}
TEST(Btree, SetIteratorsAreConst) {
using Set = absl::btree_set<int>;
EXPECT_TRUE(
(std::is_same<typename Set::iterator::reference, const int &>::value));
EXPECT_TRUE(
(std::is_same<typename Set::iterator::pointer, const int *>::value));
using MSet = absl::btree_multiset<int>;
EXPECT_TRUE(
(std::is_same<typename MSet::iterator::reference, const int &>::value));
EXPECT_TRUE(
(std::is_same<typename MSet::iterator::pointer, const int *>::value));
}
TEST(Btree, AllocConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set(alloc);
set.insert({1, 2, 3});
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocInitializerListConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set({1, 2, 3}, alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocRangeConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
std::vector<int> v = {1, 2, 3};
Set set(v.begin(), v.end(), alloc);
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used, set.size() * sizeof(int));
}
TEST(Btree, AllocCopyConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(set1, alloc2);
EXPECT_THAT(set1, ElementsAre(1, 2, 3));
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
EXPECT_EQ(bytes_used1, bytes_used2);
}
TEST(Btree, AllocMoveConstructor_SameAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used = 0;
Alloc alloc(&bytes_used);
Set set1(alloc);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
Set set2(std::move(set1), alloc);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_EQ(bytes_used, original_bytes_used);
}
TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
int64_t bytes_used1 = 0;
Alloc alloc1(&bytes_used1);
Set set1(alloc1);
set1.insert({1, 2, 3});
const int64_t original_bytes_used = bytes_used1;
EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
int64_t bytes_used2 = 0;
Alloc alloc2(&bytes_used2);
Set set2(std::move(set1), alloc2);
EXPECT_THAT(set2, ElementsAre(1, 2, 3));
EXPECT_EQ(bytes_used1, original_bytes_used);
EXPECT_EQ(bytes_used2, original_bytes_used);
}
bool IntCmp(const int a, const int b) { return a < b; }
TEST(Btree, SupportsFunctionPtrComparator) {
absl::btree_set<int, decltype(IntCmp) *> set(IntCmp);
set.insert({1, 2, 3});
EXPECT_THAT(set, ElementsAre(1, 2, 3));
EXPECT_TRUE(set.key_comp()(1, 2));
EXPECT_TRUE(set.value_comp()(1, 2));
absl::btree_map<int, int, decltype(IntCmp) *> map(&IntCmp);
map[1] = 1;
EXPECT_THAT(map, ElementsAre(Pair(1, 1)));
EXPECT_TRUE(map.key_comp()(1, 2));
EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2)));
}
template <typename Compare>
struct TransparentPassThroughComp {
using is_transparent = void;
template <typename T, typename U>
bool operator()(const T &lhs, const U &rhs) const {
return Compare()(lhs, rhs);
}
};
TEST(Btree,
SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) {
absl::btree_set<MultiKey, TransparentPassThroughComp<MultiKeyComp>> set;
set.insert(MultiKey{1, 2});
EXPECT_TRUE(set.contains(1));
}
TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
}
TEST(Btree, InvalidComparatorsCaught) {
if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
{
struct ZeroAlwaysLessCmp {
bool operator()(int lhs, int rhs) const {
if (lhs == 0) return true;
return lhs < rhs;
}
};
absl::btree_set<int, ZeroAlwaysLessCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
}
{
struct ThreeWayAlwaysLessCmp {
absl::weak_ordering operator()(int, int) const {
return absl::weak_ordering::less;
}
};
absl::btree_set<int, ThreeWayAlwaysLessCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent");
}
{
struct SumGreaterZeroCmp {
bool operator()(int lhs, int rhs) const {
if (lhs == rhs) return false;
return lhs + rhs > 0;
}
};
absl::btree_set<int, SumGreaterZeroCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}),
R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex");
}
{
struct ThreeWaySumGreaterZeroCmp {
absl::weak_ordering operator()(int lhs, int rhs) const {
if (lhs == rhs) return absl::weak_ordering::equivalent;
if (lhs + rhs > 0) return absl::weak_ordering::less;
if (lhs + rhs == 0) return absl::weak_ordering::equivalent;
return absl::weak_ordering::greater;
}
};
absl::btree_set<int, ThreeWaySumGreaterZeroCmp> set;
EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0");
}
struct ClockTime {
absl::optional<int> hour;
int minute;
};
ClockTime a = {absl::nullopt, 1};
ClockTime b = {2, 5};
ClockTime c = {6, 0};
{
struct NonTransitiveTimeCmp {
bool operator()(ClockTime lhs, ClockTime rhs) const {
if (lhs.hour.has_value() && rhs.hour.has_value() &&
*lhs.hour != *rhs.hour) {
return *lhs.hour < *rhs.hour;
}
return lhs.minute < rhs.minute;
}
};
NonTransitiveTimeCmp cmp;
ASSERT_TRUE(cmp(a, b) && cmp(b, c) && !cmp(a, c));
absl::btree_set<ClockTime, NonTransitiveTimeCmp> set;
EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
absl::btree_multiset<ClockTime, NonTransitiveTimeCmp> mset;
EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
}
{
struct ThreeWayNonTransitiveTimeCmp {
absl::weak_ordering operator()(ClockTime lhs, ClockTime rhs) const {
if (lhs.hour.has_value() && rhs.hour.has_value() &&
*lhs.hour != *rhs.hour) {
return *lhs.hour < *rhs.hour ? absl::weak_ordering::less
: absl::weak_ordering::greater;
}
return lhs.minute < rhs.minute ? absl::weak_ordering::less
: lhs.minute == rhs.minute ? absl::weak_ordering::equivalent
: absl::weak_ordering::greater;
}
};
ThreeWayNonTransitiveTimeCmp cmp;
ASSERT_TRUE(cmp(a, b) < 0 && cmp(b, c) < 0 && cmp(a, c) > 0);
absl::btree_set<ClockTime, ThreeWayNonTransitiveTimeCmp> set;
EXPECT_DEATH(set.insert({a, b, c}), "is_ordered_correctly");
absl::btree_multiset<ClockTime, ThreeWayNonTransitiveTimeCmp> mset;
EXPECT_DEATH(mset.insert({a, a, b, b, c, c}), "is_ordered_correctly");
}
}
TEST(Btree, MutatedKeysCaught) {
if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
struct IntPtrCmp {
bool operator()(int *lhs, int *rhs) const { return *lhs < *rhs; }
};
{
absl::btree_set<int *, IntPtrCmp> set;
int arr[] = {0, 1, 2};
set.insert({&arr[0], &arr[1], &arr[2]});
arr[0] = 100;
EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
}
{
absl::btree_multiset<int *, IntPtrCmp> set;
int arr[] = {0, 1, 2};
set.insert({&arr[0], &arr[0], &arr[1], &arr[1], &arr[2], &arr[2]});
arr[0] = 100;
EXPECT_DEATH(set.insert(&arr[0]), "is_ordered_correctly");
}
}
#ifndef _MSC_VER
TEST(Btree, InvalidIteratorUse) {
if (!BtreeGenerationsEnabled())
GTEST_SKIP() << "Generation validation for iterators is disabled.";
constexpr const char *kInvalidMemoryDeathMessage =
"use-after-free|invalidated iterator";
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.begin();
set.erase(it++);
EXPECT_DEATH(set.erase(it++), kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10000; ++i) set.insert(i);
auto it = set.find(5000);
ASSERT_NE(it, set.end());
set.erase(1);
EXPECT_DEATH(*it, kInvalidMemoryDeathMessage);
}
{
absl::btree_set<int> set;
for (int i = 0; i < 10; ++i) set.insert(i);
auto it = set.insert(20).first;
set.insert(30);
EXPECT_DEATH(void(it == set.begin()), kInvalidMemoryDeathMessage);
EXPECT_DEATH(void(set.begin() == it), kInvalidMemoryDeathMessage);
}
}
#endif
class OnlyConstructibleByAllocator {
explicit OnlyConstructibleByAllocator(int i) : i_(i) {}
public:
OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other)
: i_(other.i_) {}
OnlyConstructibleByAllocator &operator=(
const OnlyConstructibleByAllocator &other) {
i_ = other.i_;
return *this;
}
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
private:
template <typename T>
friend class OnlyConstructibleAllocator;
int i_;
};
template <typename T = OnlyConstructibleByAllocator>
class OnlyConstructibleAllocator : public std::allocator<T> {
public:
OnlyConstructibleAllocator() = default;
template <class U>
explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator<U> &) {}
void construct(OnlyConstructibleByAllocator *p, int i) {
new (p) OnlyConstructibleByAllocator(i);
}
template <typename Pair>
void construct(Pair *p, const int i) {
OnlyConstructibleByAllocator only(i);
new (p) Pair(std::move(only), i);
}
template <class U>
struct rebind {
using other = OnlyConstructibleAllocator<U>;
};
};
struct OnlyConstructibleByAllocatorComp {
using is_transparent = void;
bool operator()(OnlyConstructibleByAllocator a,
OnlyConstructibleByAllocator b) const {
return a.Get() < b.Get();
}
bool operator()(int a, OnlyConstructibleByAllocator b) const {
return a < b.Get();
}
bool operator()(OnlyConstructibleByAllocator a, int b) const {
return a.Get() < b;
}
};
TEST(Btree, OnlyConstructibleByAllocatorType) {
const std::array<int, 2> arr = {3, 4};
{
absl::btree_set<OnlyConstructibleByAllocator,
OnlyConstructibleByAllocatorComp,
OnlyConstructibleAllocator<>>
set;
set.emplace(1);
set.emplace_hint(set.end(), 2);
set.insert(arr.begin(), arr.end());
EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
}
{
absl::btree_multiset<OnlyConstructibleByAllocator,
OnlyConstructibleByAllocatorComp,
OnlyConstructibleAllocator<>>
set;
set.emplace(1);
set.emplace_hint(set.end(), 2);
EXPECT_THAT(set, ElementsAre(1, 2));
}
{
absl::btree_map<OnlyConstructibleByAllocator, int,
OnlyConstructibleByAllocatorComp,
OnlyConstructibleAllocator<>>
map;
map.emplace(1);
map.emplace_hint(map.end(), 2);
map.insert(arr.begin(), arr.end());
EXPECT_THAT(map,
ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
}
{
absl::btree_multimap<OnlyConstructibleByAllocator, int,
OnlyConstructibleByAllocatorComp,
OnlyConstructibleAllocator<>>
map;
map.emplace(1);
map.emplace_hint(map.end(), 2);
EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2)));
}
}
class NotAssignable {
public:
explicit NotAssignable(int i) : i_(i) {}
NotAssignable(const NotAssignable &other) : i_(other.i_) {}
NotAssignable &operator=(NotAssignable &&other) = delete;
int Get() const { return i_; }
bool operator==(int i) const { return i_ == i; }
friend bool operator<(NotAssignable a, NotAssignable b) {
return a.i_ < b.i_;
}
private:
int i_;
};
TEST(Btree, NotAssignableType) {
{
absl::btree_set<NotAssignable> set;
set.emplace(1);
set.emplace_hint(set.end(), 2);
set.insert(NotAssignable(3));
set.insert(set.end(), NotAssignable(4));
EXPECT_THAT(set, ElementsAre(1, 2, 3, 4));
set.erase(set.begin());
EXPECT_THAT(set, ElementsAre(2, 3, 4));
}
{
absl::btree_multiset<NotAssignable> set;
set.emplace(1);
set.emplace_hint(set.end(), 2);
set.insert(NotAssignable(2));
set.insert(set.end(), NotAssignable(3));
EXPECT_THAT(set, ElementsAre(1, 2, 2, 3));
set.erase(set.begin());
EXPECT_THAT(set, ElementsAre(2, 2, 3));
}
{
absl::btree_map<NotAssignable, int> map;
map.emplace(NotAssignable(1), 1);
map.emplace_hint(map.end(), NotAssignable(2), 2);
map.insert({NotAssignable(3), 3});
map.insert(map.end(), {NotAssignable(4), 4});
EXPECT_THAT(map,
ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4)));
map.erase(map.begin());
EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4)));
}
{
absl::btree_multimap<NotAssignable, int> map;
map.emplace(NotAssignable(1), 1);
map.emplace_hint(map.end(), NotAssignable(2), 2);
map.insert({NotAssignable(2), 3});
map.insert(map.end(), {NotAssignable(3), 3});
EXPECT_THAT(map,
ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3)));
map.erase(map.begin());
EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3)));
}
}
struct ArenaLike {
void* recycled = nullptr;
size_t recycled_size = 0;
};
template <typename T>
class ArenaLikeAllocator : public std::allocator<T> {
public:
template <typename U>
struct rebind {
using other = ArenaLikeAllocator<U>;
};
explicit ArenaLikeAllocator(ArenaLike* arena) noexcept : arena_(arena) {}
~ArenaLikeAllocator() {
if (arena_->recycled != nullptr) {
delete [] static_cast<T*>(arena_->recycled);
arena_->recycled = nullptr;
}
}
template<typename U>
explicit ArenaLikeAllocator(const ArenaLikeAllocator<U>& other) noexcept
: arena_(other.arena_) {}
T* allocate(size_t num_objects, const void* = nullptr) {
size_t size = num_objects * sizeof(T);
if (arena_->recycled != nullptr && arena_->recycled_size == size) {
T* result = static_cast<T*>(arena_->recycled);
arena_->recycled = nullptr;
return result;
}
return new T[num_objects];
}
void deallocate(T* p, size_t num_objects) {
size_t size = num_objects * sizeof(T);
memset(p, 0xde, size);
if (arena_->recycled == nullptr) {
arena_->recycled = p;
arena_->recycled_size = size;
} else {
delete [] p;
}
}
ArenaLike* arena_;
};
TEST(Btree, ReusePoisonMemory) {
using Alloc = ArenaLikeAllocator<int64_t>;
using Set = absl::btree_set<int64_t, std::less<int64_t>, Alloc>;
ArenaLike arena;
Alloc alloc(&arena);
Set set(alloc);
set.insert(0);
set.erase(0);
set.insert(0);
}
TEST(Btree, IteratorSubtraction) {
absl::BitGen bitgen;
std::vector<int> vec;
for (int i = 0; i < 1000000; ++i) vec.push_back(i);
absl::c_shuffle(vec, bitgen);
absl::btree_set<int> set;
for (int i : vec) set.insert(i);
for (int i = 0; i < 1000; ++i) {
size_t begin = absl::Uniform(bitgen, 0u, set.size());
size_t end = absl::Uniform(bitgen, begin, set.size());
ASSERT_EQ(end - begin, set.find(end) - set.find(begin))
<< begin << " " << end;
}
}
TEST(Btree, DereferencingEndIterator) {
if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
absl::btree_set<int> set;
for (int i = 0; i < 1000; ++i) set.insert(i);
EXPECT_DEATH(*set.end(), R"regex(Dereferencing end\(\) iterator)regex");
}
TEST(Btree, InvalidIteratorComparison) {
if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
absl::btree_set<int> set1, set2;
for (int i = 0; i < 1000; ++i) {
set1.insert(i);
set2.insert(i);
}
constexpr const char *kValueInitDeathMessage =
"Comparing default-constructed iterator with .*non-default-constructed "
"iterator";
typename absl::btree_set<int>::iterator iter1, iter2;
EXPECT_EQ(iter1, iter2);
EXPECT_DEATH(void(set1.begin() == iter1), kValueInitDeathMessage);
EXPECT_DEATH(void(iter1 == set1.begin()), kValueInitDeathMessage);
constexpr const char *kDifferentContainerDeathMessage =
"Comparing iterators from different containers";
iter1 = set1.begin();
iter2 = set2.begin();
EXPECT_DEATH(void(iter1 == iter2), kDifferentContainerDeathMessage);
EXPECT_DEATH(void(iter2 == iter1), kDifferentContainerDeathMessage);
}
TEST(Btree, InvalidPointerUse) {
if (!kAsan)
GTEST_SKIP() << "We only detect invalid pointer use in ASan mode.";
absl::btree_set<int> set;
set.insert(0);
const int *ptr = &*set.begin();
set.insert(1);
EXPECT_DEATH(std::cout << *ptr, "use-after-free");
size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode<decltype(set)>();
for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i);
ptr = &*set.begin();
set.insert(static_cast<int>(slots_per_node));
EXPECT_DEATH(std::cout << *ptr, "use-after-free");
}
template<typename Set>
void TestBasicFunctionality(Set set) {
using value_type = typename Set::value_type;
for (int i = 0; i < 100; ++i) { set.insert(value_type(i)); }
for (int i = 50; i < 100; ++i) { set.erase(value_type(i)); }
auto it = set.begin();
for (int i = 0; i < 50; ++i, ++it) {
ASSERT_EQ(set.find(value_type(i)), it) << i;
}
}
template<size_t align>
struct alignas(align) OveralignedKey {
explicit OveralignedKey(int i) : key(i) {}
bool operator<(const OveralignedKey &other) const { return key < other.key; }
int key = 0;
};
TEST(Btree, OveralignedKey) {
TestBasicFunctionality(
SizedBtreeSet<OveralignedKey<16>, 8>());
TestBasicFunctionality(
SizedBtreeSet<OveralignedKey<16>, 9>());
}
TEST(Btree, FieldTypeEqualsSlotType) {
using set_type = absl::btree_set<uint8_t>;
static_assert(BtreeNodePeer::FieldTypeEqualsSlotType<set_type>(), "");
TestBasicFunctionality(set_type());
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/btree.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/btree_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
f5bec509-da94-4e49-bc7e-4fa9a774cd5b | cpp | google/googletest | sample4 | googletest/samples/sample4.cc | googletest/samples/sample4_unittest.cc | #include "sample4.h"
#include <stdio.h>
int Counter::Increment() { return counter_++; }
int Counter::Decrement() {
if (counter_ == 0) {
return counter_;
} else {
return counter_--;
}
}
void Counter::Print() const { printf("%d", counter_); } | #include "sample4.h"
#include "gtest/gtest.h"
namespace {
TEST(Counter, Increment) {
Counter c;
EXPECT_EQ(0, c.Decrement());
EXPECT_EQ(0, c.Increment());
EXPECT_EQ(1, c.Increment());
EXPECT_EQ(2, c.Increment());
EXPECT_EQ(3, c.Decrement());
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/samples/sample4.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/samples/sample4_unittest.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
55bf1df7-0188-4aae-92bb-2d657ff431ce | cpp | tensorflow/tensorflow | google_auth_provider | third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider_test.cc | #include "tsl/platform/cloud/google_auth_provider.h"
#ifndef _WIN32
#include <pwd.h>
#include <unistd.h>
#else
#include <sys/types.h>
#endif
#include <fstream>
#include <utility>
#include "absl/strings/match.h"
#include "json/json.h"
#include "tsl/platform/base64.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/retrying_utils.h"
namespace tsl {
namespace {
constexpr char kGoogleApplicationCredentials[] =
"GOOGLE_APPLICATION_CREDENTIALS";
constexpr char kGoogleAuthTokenForTesting[] = "GOOGLE_AUTH_TOKEN_FOR_TESTING";
constexpr char kCloudSdkConfig[] = "CLOUDSDK_CONFIG";
constexpr char kNoGceCheck[] = "NO_GCE_CHECK";
constexpr char kGCloudConfigFolder[] = ".config/gcloud/";
constexpr char kWellKnownCredentialsFile[] =
"application_default_credentials.json";
constexpr int kExpirationTimeMarginSec = 60;
constexpr char kOAuthV3Url[] = "https:
constexpr char kOAuthV4Url[] = "https:
constexpr char kGceTokenPath[] = "instance/service-accounts/default/token";
constexpr char kOAuthScope[] = "https:
bool IsFile(const string& filename) {
std::ifstream fstream(filename.c_str());
return fstream.good();
}
absl::Status GetEnvironmentVariableFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
const char* result = std::getenv(kGoogleApplicationCredentials);
if (!result || !IsFile(result)) {
return errors::NotFound(strings::StrCat("$", kGoogleApplicationCredentials,
" is not set or corrupt."));
}
*filename = result;
return absl::OkStatus();
}
absl::Status GetWellKnownFileName(string* filename) {
if (!filename) {
return errors::FailedPrecondition("'filename' cannot be nullptr.");
}
string config_dir;
const char* config_dir_override = std::getenv(kCloudSdkConfig);
if (config_dir_override) {
config_dir = config_dir_override;
} else {
const char* home_dir = std::getenv("HOME");
if (!home_dir) {
return errors::FailedPrecondition("Could not read $HOME.");
}
config_dir = io::JoinPath(home_dir, kGCloudConfigFolder);
}
auto result = io::JoinPath(config_dir, kWellKnownCredentialsFile);
if (!IsFile(result)) {
return errors::NotFound(
"Could not find the credentials file in the standard gcloud location.");
}
*filename = result;
return absl::OkStatus();
}
}
GoogleAuthProvider::GoogleAuthProvider(
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client)
: GoogleAuthProvider(std::unique_ptr<OAuthClient>(new OAuthClient()),
std::move(compute_engine_metadata_client),
Env::Default()) {}
GoogleAuthProvider::GoogleAuthProvider(
std::unique_ptr<OAuthClient> oauth_client,
std::shared_ptr<ComputeEngineMetadataClient> compute_engine_metadata_client,
Env* env)
: oauth_client_(std::move(oauth_client)),
compute_engine_metadata_client_(
std::move(compute_engine_metadata_client)),
env_(env) {}
absl::Status GoogleAuthProvider::GetToken(string* t) {
mutex_lock lock(mu_);
const uint64 now_sec = env_->NowSeconds();
if (now_sec + kExpirationTimeMarginSec < expiration_timestamp_sec_) {
*t = current_token_;
return absl::OkStatus();
}
if (GetTokenForTesting().ok()) {
*t = current_token_;
return absl::OkStatus();
}
auto token_from_files_status = GetTokenFromFiles();
if (token_from_files_status.ok()) {
*t = current_token_;
return absl::OkStatus();
}
char* no_gce_check_var = std::getenv(kNoGceCheck);
bool skip_gce_check = no_gce_check_var != nullptr &&
absl::EqualsIgnoreCase(no_gce_check_var, "true");
absl::Status token_from_gce_status;
if (skip_gce_check) {
token_from_gce_status =
absl::Status(absl::StatusCode::kCancelled,
strings::StrCat("GCE check skipped due to presence of $",
kNoGceCheck, " environment variable."));
} else {
token_from_gce_status = GetTokenFromGce();
}
if (token_from_gce_status.ok()) {
*t = current_token_;
return absl::OkStatus();
}
if (skip_gce_check) {
LOG(INFO)
<< "Attempting an empty bearer token since no token was retrieved "
<< "from files, and GCE metadata check was skipped.";
} else {
LOG(WARNING)
<< "All attempts to get a Google authentication bearer token failed, "
<< "returning an empty token. Retrieving token from files failed with "
"\""
<< token_from_files_status.ToString() << "\"."
<< " Retrieving token from GCE failed with \""
<< token_from_gce_status.ToString() << "\".";
}
*t = "";
if (skip_gce_check) {
expiration_timestamp_sec_ = 0;
} else {
expiration_timestamp_sec_ = UINT64_MAX;
}
current_token_ = "";
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenFromFiles() {
string credentials_filename;
if (!GetEnvironmentVariableFileName(&credentials_filename).ok() &&
!GetWellKnownFileName(&credentials_filename).ok()) {
return errors::NotFound("Could not locate the credentials file.");
}
Json::Value json;
Json::Reader reader;
std::ifstream credentials_fstream(credentials_filename);
if (!reader.parse(credentials_fstream, json)) {
return errors::FailedPrecondition(
"Couldn't parse the JSON credentials file.");
}
if (json.isMember("refresh_token")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromRefreshTokenJson(
json, kOAuthV3Url, ¤t_token_, &expiration_timestamp_sec_));
} else if (json.isMember("private_key")) {
TF_RETURN_IF_ERROR(oauth_client_->GetTokenFromServiceAccountJson(
json, kOAuthV4Url, kOAuthScope, ¤t_token_,
&expiration_timestamp_sec_));
} else {
return errors::FailedPrecondition(
"Unexpected content of the JSON credentials file.");
}
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenFromGce() {
std::vector<char> response_buffer;
const uint64 request_timestamp_sec = env_->NowSeconds();
TF_RETURN_IF_ERROR(compute_engine_metadata_client_->GetMetadata(
kGceTokenPath, &response_buffer));
absl::string_view response =
absl::string_view(&response_buffer[0], response_buffer.size());
TF_RETURN_IF_ERROR(oauth_client_->ParseOAuthResponse(
response, request_timestamp_sec, ¤t_token_,
&expiration_timestamp_sec_));
return absl::OkStatus();
}
absl::Status GoogleAuthProvider::GetTokenForTesting() {
const char* token = std::getenv(kGoogleAuthTokenForTesting);
if (!token) {
return errors::NotFound("The env variable for testing was not set.");
}
expiration_timestamp_sec_ = UINT64_MAX;
current_token_ = token;
return absl::OkStatus();
}
} | #include "tsl/platform/cloud/google_auth_provider.h"
#include <stdlib.h>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
string TestData() {
return io::JoinPath(testing::TslSrcRoot(), "platform", "cloud", "testdata");
}
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now; }
uint64 now = 10000;
};
class FakeOAuthClient : public OAuthClient {
public:
absl::Status GetTokenFromServiceAccountJson(
Json::Value json, absl::string_view oauth_server_uri,
absl::string_view scope, string* token,
uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return absl::OkStatus();
}
absl::Status GetTokenFromRefreshTokenJson(
Json::Value json, absl::string_view oauth_server_uri, string* token,
uint64* expiration_timestamp_sec) override {
provided_credentials_json = json;
*token = return_token;
*expiration_timestamp_sec = return_expiration_timestamp;
return absl::OkStatus();
}
string return_token;
uint64 return_expiration_timestamp;
Json::Value provided_credentials_json;
};
}
class GoogleAuthProviderTest : public ::testing::Test {
protected:
void SetUp() override { ClearEnvVars(); }
void TearDown() override { ClearEnvVars(); }
void ClearEnvVars() {
unsetenv("CLOUDSDK_CONFIG");
unsetenv("GOOGLE_APPLICATION_CREDENTIALS");
unsetenv("GOOGLE_AUTH_TOKEN_FOR_TESTING");
unsetenv("NO_GCE_CHECK");
}
};
TEST_F(GoogleAuthProviderTest, EnvironmentVariable_Caching) {
setenv("GOOGLE_APPLICATION_CREDENTIALS",
io::JoinPath(TestData(), "service_account_credentials.json").c_str(),
1);
setenv("CLOUDSDK_CONFIG", TestData().c_str(),
1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake_key_id",
oauth_client->provided_credentials_json.get("private_key_id", "")
.asString());
oauth_client->return_token = "new-fake-token";
env.now += 3000;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-token", token);
}
TEST_F(GoogleAuthProviderTest, GCloudRefreshToken) {
setenv("CLOUDSDK_CONFIG", TestData().c_str(), 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
oauth_client->return_token = "fake-token";
oauth_client->return_expiration_timestamp = env.NowSeconds() + 3600;
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-token", token);
EXPECT_EQ("fake-refresh-token",
oauth_client->provided_credentials_json.get("refresh_token", "")
.asString());
}
TEST_F(GoogleAuthProviderTest, RunningOnGCE) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests(
{new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})"),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::Unavailable("503"), 503),
new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
R"(
{
"access_token":"new-fake-gce-token",
"expires_in": 3920,
"token_type":"Bearer"
})")});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 3700;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("fake-gce-token", token);
env.now += 598;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("new-fake-gce-token", token);
}
TEST_F(GoogleAuthProviderTest, OverrideForTesting) {
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "tokenForTesting", 1);
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> empty_requests;
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&empty_requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("tokenForTesting", token);
}
TEST_F(GoogleAuthProviderTest, NothingAvailable) {
auto oauth_client = new FakeOAuthClient;
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"/service-accounts/default/token\n"
"Header Metadata-Flavor: Google\n",
"", errors::NotFound("404"), 404)});
FakeEnv env;
std::shared_ptr<HttpRequest::Factory> fakeHttpRequestFactory =
std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadataClient = std::make_shared<ComputeEngineMetadataClient>(
fakeHttpRequestFactory, RetryConfig(0 ));
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
metadataClient, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
}
TEST_F(GoogleAuthProviderTest, NoGceCheckEnvironmentVariable) {
setenv("NO_GCE_CHECK", "True", 1);
auto oauth_client = new FakeOAuthClient;
FakeEnv env;
GoogleAuthProvider provider(std::unique_ptr<OAuthClient>(oauth_client),
nullptr, &env);
string token;
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("NO_GCE_CHECK", "true", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("", token);
setenv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "newToken", 1);
TF_EXPECT_OK(provider.GetToken(&token));
EXPECT_EQ("newToken", token);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/google_auth_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f713565-a2b4-48a7-aeea-db9993e84384 | cpp | abseil/abseil-cpp | common_policy_traits | absl/container/internal/common_policy_traits.h | absl/container/internal/common_policy_traits_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
#define ABSL_CONTAINER_INTERNAL_COMMON_POLICY_TRAITS_H_
#include <cstddef>
#include <cstring>
#include <memory>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class Policy, class = void>
struct common_policy_traits {
using slot_type = typename Policy::slot_type;
using reference = decltype(Policy::element(std::declval<slot_type*>()));
using value_type = typename std::remove_reference<reference>::type;
template <class Alloc, class... Args>
static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
Policy::construct(alloc, slot, std::forward<Args>(args)...);
}
template <class Alloc>
static auto destroy(Alloc* alloc, slot_type* slot) {
return Policy::destroy(alloc, slot);
}
template <class Alloc>
static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
transfer_impl(alloc, new_slot, old_slot, Rank2{});
}
template <class P = Policy>
static auto element(absl::remove_const_t<slot_type>* slot)
-> decltype(P::element(slot)) {
return P::element(slot);
}
template <class P = Policy>
static auto element(const slot_type* slot) -> decltype(P::element(slot)) {
return P::element(slot);
}
static constexpr bool transfer_uses_memcpy() {
return std::is_same<decltype(transfer_impl<std::allocator<char>>(
nullptr, nullptr, nullptr, Rank2{})),
std::true_type>::value;
}
template <class Alloc>
static constexpr bool destroy_is_trivial() {
return std::is_same<decltype(destroy<Alloc>(nullptr, nullptr)),
std::true_type>::value;
}
private:
struct Rank0 {};
struct Rank1 : Rank0 {};
struct Rank2 : Rank1 {};
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot,
Rank2) -> decltype(P::transfer(alloc, new_slot,
old_slot)) {
return P::transfer(alloc, new_slot, old_slot);
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
template <class Alloc,
typename = std::enable_if_t<absl::is_trivially_relocatable<
std::conditional_t<false, Alloc, value_type>>::value>>
static std::true_type transfer_impl(Alloc*, slot_type* new_slot,
slot_type* old_slot, Rank1) {
std::memcpy(
static_cast<void*>(std::launder(
const_cast<std::remove_const_t<value_type>*>(&element(new_slot)))),
static_cast<const void*>(&element(old_slot)), sizeof(value_type));
return {};
}
#endif
template <class Alloc>
static void transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, Rank0) {
construct(alloc, new_slot, std::move(element(old_slot)));
destroy(alloc, old_slot);
}
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/common_policy_traits.h"
#include <functional>
#include <memory>
#include <type_traits>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::testing::MockFunction;
using ::testing::AnyNumber;
using ::testing::ReturnRef;
using Slot = int;
struct PolicyWithoutOptionalOps {
using slot_type = Slot;
using key_type = Slot;
using init_type = Slot;
struct PolicyFunctions {
std::function<void(void*, Slot*, Slot)> construct;
std::function<void(void*, Slot*)> destroy;
std::function<Slot&(Slot*)> element;
};
static PolicyFunctions* functions() {
static PolicyFunctions* functions = new PolicyFunctions();
return functions;
}
static void construct(void* a, Slot* b, Slot c) {
functions()->construct(a, b, c);
}
static void destroy(void* a, Slot* b) { functions()->destroy(a, b); }
static Slot& element(Slot* b) { return functions()->element(b); }
};
struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
struct TransferFunctions {
std::function<void(void*, Slot*, Slot*)> transfer;
};
static TransferFunctions* transfer_fn() {
static TransferFunctions* transfer_fn = new TransferFunctions();
return transfer_fn;
}
static void transfer(void* a, Slot* b, Slot* c) {
transfer_fn()->transfer(a, b, c);
}
};
struct PolicyWithMemcpyTransferAndTrivialDestroy : PolicyWithoutOptionalOps {
static std::true_type transfer(void*, Slot*, Slot*) { return {}; }
static std::true_type destroy(void*, Slot*) { return {}; }
};
struct Test : ::testing::Test {
Test() {
PolicyWithoutOptionalOps::functions()->construct = [&](void* a1, Slot* a2,
Slot a3) {
construct.Call(a1, a2, std::move(a3));
};
PolicyWithoutOptionalOps::functions()->destroy = [&](void* a1, Slot* a2) {
destroy.Call(a1, a2);
};
PolicyWithoutOptionalOps::functions()->element = [&](Slot* a1) -> Slot& {
return element.Call(a1);
};
PolicyWithOptionalOps::transfer_fn()->transfer =
[&](void* a1, Slot* a2, Slot* a3) { return transfer.Call(a1, a2, a3); };
}
std::allocator<Slot> alloc;
int a = 53;
MockFunction<void(void*, Slot*, Slot)> construct;
MockFunction<void(void*, Slot*)> destroy;
MockFunction<Slot&(Slot*)> element;
MockFunction<void(void*, Slot*, Slot*)> transfer;
};
TEST_F(Test, construct) {
EXPECT_CALL(construct, Call(&alloc, &a, 53));
common_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
}
TEST_F(Test, destroy) {
EXPECT_CALL(destroy, Call(&alloc, &a));
common_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
}
TEST_F(Test, element) {
int b = 0;
EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
EXPECT_EQ(&b, &common_policy_traits<PolicyWithoutOptionalOps>::element(&a));
}
TEST_F(Test, without_transfer) {
int b = 42;
EXPECT_CALL(element, Call(&a)).Times(AnyNumber()).WillOnce(ReturnRef(a));
EXPECT_CALL(element, Call(&b)).WillOnce(ReturnRef(b));
EXPECT_CALL(construct, Call(&alloc, &a, b)).Times(AnyNumber());
EXPECT_CALL(destroy, Call(&alloc, &b)).Times(AnyNumber());
common_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
}
TEST_F(Test, with_transfer) {
int b = 42;
EXPECT_CALL(transfer, Call(&alloc, &a, &b));
common_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
}
TEST(TransferUsesMemcpy, Basic) {
EXPECT_FALSE(
common_policy_traits<PolicyWithOptionalOps>::transfer_uses_memcpy());
EXPECT_TRUE(
common_policy_traits<
PolicyWithMemcpyTransferAndTrivialDestroy>::transfer_uses_memcpy());
}
TEST(DestroyIsTrivial, Basic) {
EXPECT_FALSE(common_policy_traits<PolicyWithOptionalOps>::destroy_is_trivial<
std::allocator<char>>());
EXPECT_TRUE(common_policy_traits<PolicyWithMemcpyTransferAndTrivialDestroy>::
destroy_is_trivial<std::allocator<char>>());
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/common_policy_traits.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/common_policy_traits_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ac223d64-35cb-4f4c-b7a5-4be97a0a0e39 | cpp | google/tensorstore | status_testutil | tensorstore/util/status_testutil.cc | tensorstore/util/status_testutil_test.cc | #include "tensorstore/util/status_testutil.h"
#include <ostream>
#include <regex>
#include <string>
#include <system_error>
#include <gmock/gmock.h>
#include "absl/status/status.h"
namespace tensorstore {
namespace internal_status {
namespace {
template <typename StringType>
class RegexMatchImpl : public ::testing::MatcherInterface<StringType> {
public:
RegexMatchImpl(const std::string& message_pattern)
: message_pattern_(message_pattern) {}
void DescribeTo(std::ostream* os) const override {
*os << "message matches pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
void DescribeNegationTo(std::ostream* os) const override {
*os << "message doesn't match pattern ";
::testing::internal::UniversalPrint(message_pattern_, os);
}
bool MatchAndExplain(
StringType message,
::testing::MatchResultListener* result_listener) const override {
return std::regex_match(message, std::regex(message_pattern_));
}
private:
const std::string message_pattern_;
};
}
}
internal_status::StatusIsMatcher MatchesStatus(
absl::StatusCode status_code, const std::string& message_pattern) {
return internal_status::StatusIsMatcher(
status_code, ::testing::Matcher<const std::string&>(
new internal_status::RegexMatchImpl<const std::string&>(
message_pattern)));
}
} | #include "tensorstore/util/status_testutil.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
namespace {
using ::tensorstore::Future;
using ::tensorstore::Result;
template <typename MatcherType, typename Value>
std::string Explain(const MatcherType& m, const Value& x) {
testing::StringMatchResultListener listener;
ExplainMatchResult(m, x, &listener);
return listener.str();
}
TEST(StatusTestutilTest, IsOk) {
EXPECT_THAT([]() -> Future<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT([]() -> Result<void> { return absl::OkStatus(); }(),
::tensorstore::IsOk());
EXPECT_THAT(absl::OkStatus(), ::tensorstore::IsOk());
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOk());
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOk());
EXPECT_THAT(absl::InternalError(""), ::testing::Not(::tensorstore::IsOk()));
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::InternalError("")),
testing::IsEmpty());
EXPECT_THAT(Explain(::tensorstore::IsOk(), absl::OkStatus()),
testing::IsEmpty());
TENSORSTORE_EXPECT_OK(absl::OkStatus());
TENSORSTORE_ASSERT_OK(absl::OkStatus());
TENSORSTORE_EXPECT_OK([]() -> Future<void> { return absl::OkStatus(); }());
TENSORSTORE_ASSERT_OK([]() -> Result<void> { return absl::OkStatus(); }());
}
TEST(StatusTestutilTest, Optional) {
EXPECT_THAT(Result<int>{1}, ::testing::Optional(1));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(Result<int>{1}, ::testing::Optional(::testing::_));
EXPECT_THAT(Result<int>{2}, ::testing::Optional(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::testing::Optional(1)));
EXPECT_THAT(
Explain(::testing::Optional(1), Result<int>(absl::InternalError(""))),
testing::HasSubstr("which is not engaged"));
EXPECT_THAT(Explain(::testing::Optional(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, IsOkAndHolds) {
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(1));
EXPECT_THAT(Future<int>{2}, ::tensorstore::IsOkAndHolds(2));
EXPECT_THAT(Result<int>{1}, ::tensorstore::IsOkAndHolds(::testing::_));
EXPECT_THAT(Result<int>{2}, ::tensorstore::IsOkAndHolds(::testing::Not(1)));
EXPECT_THAT(Result<int>{absl::InternalError("")},
::testing::Not(::tensorstore::IsOkAndHolds(1)));
int result;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(result, []() -> Result<int> { return 2; }());
EXPECT_EQ(2, result);
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1),
Result<int>(absl::InternalError(""))),
testing::HasSubstr("whose status code is INTERNAL"));
EXPECT_THAT(Explain(::tensorstore::IsOkAndHolds(1), Result<int>(2)),
testing::HasSubstr("whose value 2 doesn't match"));
}
TEST(StatusTestutilTest, StatusIs) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::StatusIs(absl::StatusCode::kInternal));
EXPECT_THAT(
absl::OkStatus(),
::testing::Not(::tensorstore::StatusIs(absl::StatusCode::kInternal)));
EXPECT_THAT(absl::OkStatus(), ::tensorstore::StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(Explain(::tensorstore::StatusIs(absl::StatusCode::kOk),
absl::InternalError("")),
testing::HasSubstr("whose status code INTERNAL doesn't match"));
}
TEST(StatusTestutilTest, StatusIs_WithMessage) {
EXPECT_THAT(
Result<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
Future<void>{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(
absl::InternalError("strongbad"),
::tensorstore::StatusIs(::testing::_, ::testing::HasSubstr("bad")));
EXPECT_THAT(absl::InternalError("strongbad"),
::tensorstore::StatusIs(
::testing::_, ::testing::Not(::testing::HasSubstr("good"))));
EXPECT_THAT(
absl::Status{absl::InternalError("strongbad")},
::tensorstore::StatusIs(::testing::Not(absl::StatusCode::kAborted),
::testing::Not(::testing::HasSubstr("good"))));
}
TEST(StatusTestutilTest, MatchesStatus) {
EXPECT_THAT(Result<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(Future<void>{absl::InternalError("")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::InternalError(""),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal));
EXPECT_THAT(absl::OkStatus(),
::tensorstore::MatchesStatus(absl::StatusCode::kOk));
}
TEST(StatusTestutilTest, MatchesStatus_Pattern) {
EXPECT_THAT(Result<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(Future<void>{absl::InternalError("a")},
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::tensorstore::MatchesStatus(absl::StatusCode::kInternal, "a"));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kInternal, "b")));
EXPECT_THAT(absl::InternalError("a"),
::testing::Not(::tensorstore::MatchesStatus(
absl::StatusCode::kCancelled, "a")));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/status_testutil.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/status_testutil_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
578d4096-9bda-4a7a-be74-8cdd6b64c2cb | cpp | google/cel-cpp | mutable_list_impl | runtime/internal/mutable_list_impl.cc | runtime/internal/mutable_list_impl_test.cc | #include "runtime/internal/mutable_list_impl.h"
#include <memory>
#include <string>
#include <utility>
#include "common/native_type.h"
#include "common/type.h"
#include "common/value.h"
namespace cel::runtime_internal {
using ::cel::NativeTypeId;
MutableListValue::MutableListValue(
cel::Unique<cel::ListValueBuilder> list_builder)
: cel::OpaqueValueInterface(), list_builder_(std::move(list_builder)) {}
absl::Status MutableListValue::Append(cel::Value element) {
return list_builder_->Add(std::move(element));
}
absl::StatusOr<cel::ListValue> MutableListValue::Build() && {
return std::move(*list_builder_).Build();
}
std::string MutableListValue::DebugString() const {
return kMutableListTypeName;
}
NativeTypeId MutableListValue::GetNativeTypeId() const {
return cel::NativeTypeId::For<MutableListValue>();
}
} | #include "runtime/internal/mutable_list_impl.h"
#include "base/type_provider.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_factory.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "internal/testing.h"
namespace cel::runtime_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
TEST(MutableListImplValue, Creation) {
common_internal::LegacyValueManager value_factory(
MemoryManagerRef::ReferenceCounting(), TypeProvider::Builtin());
ASSERT_OK_AND_ASSIGN(auto builder, value_factory.NewListValueBuilder(
value_factory.GetDynListType()));
auto mutable_list_value =
value_factory.GetMemoryManager().MakeShared<MutableListValue>(
std::move(builder));
OpaqueValue opaque_handle = mutable_list_value;
EXPECT_EQ(NativeTypeId::Of(opaque_handle),
NativeTypeId::For<MutableListValue>());
EXPECT_EQ(opaque_handle.operator->(), mutable_list_value.operator->());
}
TEST(MutableListImplValue, ListBuilding) {
common_internal::LegacyValueManager value_factory(
MemoryManagerRef::ReferenceCounting(), TypeProvider::Builtin());
ASSERT_OK_AND_ASSIGN(auto builder, value_factory.NewListValueBuilder(
value_factory.GetDynListType()));
auto mutable_list_value =
value_factory.GetMemoryManager().MakeShared<MutableListValue>(
std::move(builder));
MutableListValue& mutable_ref =
const_cast<MutableListValue&>(*mutable_list_value);
ASSERT_OK(mutable_ref.Append(value_factory.CreateIntValue(1)));
ASSERT_OK_AND_ASSIGN(ListValue list_value, std::move(mutable_ref).Build());
EXPECT_THAT(list_value.Size(), IsOkAndHolds(1));
ASSERT_OK_AND_ASSIGN(auto element, list_value.Get(value_factory, 0));
ASSERT_TRUE(InstanceOf<IntValue>(element));
EXPECT_EQ(Cast<IntValue>(element).NativeValue(), 1);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/internal/mutable_list_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/internal/mutable_list_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
25324229-a464-4608-8206-4630461737fa | cpp | tensorflow/tensorflow | convolution_4d_expander | third_party/xla/xla/service/convolution_4d_expander.cc | third_party/xla/xla/service/convolution_4d_expander_test.cc | #include "xla/service/convolution_4d_expander.h"
#include <algorithm>
#include <functional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
bool Convolution4DExpander::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
const ConvolutionDimensionNumbers& dim_nums =
instruction->convolution_dimension_numbers();
if (dim_nums.input_spatial_dimensions().size() != 4) {
return false;
}
Shape input = instruction->operand(0)->shape();
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t spatial_dim = dim_nums.input_spatial_dimensions(i);
if (input.dimensions(spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> Convolution4DExpander::ExpandInstruction(
HloInstruction* instruction) {
HloComputation* computation = instruction->parent();
ConvolutionDimensionNumbers dim_nums =
instruction->convolution_dimension_numbers();
ConvolutionDimensionNumbers new_dim_nums = dim_nums;
std::vector<int64_t> removed_input_dimensions;
std::vector<int64_t> removed_kernel_dimensions;
std::vector<int64_t> removed_output_dimensions;
new_dim_nums.clear_input_spatial_dimensions();
new_dim_nums.clear_output_spatial_dimensions();
new_dim_nums.clear_kernel_spatial_dimensions();
Window new_window;
HloInstruction* input = instruction->mutable_operand(0);
for (int64_t i = 0; i < dim_nums.input_spatial_dimensions().size(); ++i) {
int64_t input_spatial_dim = dim_nums.input_spatial_dimensions(i);
int64_t output_spatial_dim = dim_nums.output_spatial_dimensions(i);
int64_t kernel_spatial_dim = dim_nums.kernel_spatial_dimensions(i);
if (input->shape().dimensions(input_spatial_dim) == 1 &&
instruction->window().dimensions(i).padding_low() == 0 &&
instruction->window().dimensions(i).padding_high() == 0) {
removed_input_dimensions.push_back(input_spatial_dim);
removed_output_dimensions.push_back(output_spatial_dim);
removed_kernel_dimensions.push_back(kernel_spatial_dim);
} else {
*new_window.add_dimensions() = instruction->window().dimensions(i);
new_dim_nums.add_input_spatial_dimensions(input_spatial_dim);
new_dim_nums.add_output_spatial_dimensions(output_spatial_dim);
new_dim_nums.add_kernel_spatial_dimensions(kernel_spatial_dim);
}
}
std::sort(removed_input_dimensions.begin(), removed_input_dimensions.end(),
std::greater<>());
std::sort(removed_output_dimensions.begin(), removed_output_dimensions.end(),
std::greater<>());
std::sort(removed_kernel_dimensions.begin(), removed_kernel_dimensions.end(),
std::greater<>());
Shape new_input_shape = input->shape();
for (int64_t dim : removed_input_dimensions) {
new_input_shape.DeleteDimension(dim);
}
HloInstruction* kernel = instruction->mutable_operand(1);
Shape new_kernel_shape = kernel->shape();
for (int64_t dim : removed_kernel_dimensions) {
new_kernel_shape.DeleteDimension(dim);
}
Shape new_output_shape = instruction->shape();
for (int64_t dim : removed_output_dimensions) {
new_output_shape.DeleteDimension(dim);
}
auto compute_new_dimension =
[](const std::vector<int64_t>& removed_dimensions,
int64_t old_dimension) {
int64_t num_smaller = absl::c_count_if(
removed_dimensions, [old_dimension](int64_t removed_dimension) {
return removed_dimension < old_dimension;
});
return old_dimension - num_smaller;
};
new_dim_nums.set_input_batch_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_batch_dimension()));
new_dim_nums.set_input_feature_dimension(compute_new_dimension(
removed_input_dimensions, new_dim_nums.input_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.input_spatial_dimensions().size(); ++i) {
new_dim_nums.set_input_spatial_dimensions(
i, compute_new_dimension(removed_input_dimensions,
new_dim_nums.input_spatial_dimensions(i)));
}
new_dim_nums.set_output_batch_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_batch_dimension()));
new_dim_nums.set_output_feature_dimension(compute_new_dimension(
removed_output_dimensions, new_dim_nums.output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.output_spatial_dimensions().size();
++i) {
new_dim_nums.set_output_spatial_dimensions(
i, compute_new_dimension(removed_output_dimensions,
new_dim_nums.output_spatial_dimensions(i)));
}
new_dim_nums.set_kernel_input_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_input_feature_dimension()));
new_dim_nums.set_kernel_output_feature_dimension(
compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_output_feature_dimension()));
for (int64_t i = 0; i < new_dim_nums.kernel_spatial_dimensions().size();
++i) {
new_dim_nums.set_kernel_spatial_dimensions(
i, compute_new_dimension(removed_kernel_dimensions,
new_dim_nums.kernel_spatial_dimensions(i)));
}
HloInstruction* reshaped_input = computation->AddInstruction(
HloInstruction::CreateReshape(new_input_shape, input));
HloInstruction* reshaped_kernel = computation->AddInstruction(
HloInstruction::CreateReshape(new_kernel_shape, kernel));
instruction->set_convolution_dimension_numbers(new_dim_nums);
instruction->set_window(new_window);
HloInstruction* new_convolution =
computation->AddInstruction(instruction->CloneWithNewOperands(
new_output_shape, {reshaped_input, reshaped_kernel}));
return computation->AddInstruction(
HloInstruction::CreateReshape(instruction->shape(), new_convolution));
}
} | #include "xla/service/convolution_4d_expander.h"
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using Convolution4DExpanderTest = HloTestBase;
TEST_F(Convolution4DExpanderTest, ConvertTo2DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 2);
}
TEST_F(Convolution4DExpanderTest, ConvertTo3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,2,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 pad=0_0x0_0x1_0x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 3);
}
TEST_F(Convolution4DExpanderTest, ConvertTo0DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,1,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,1,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,1,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_TRUE(expander_pass.Run(module.get()).value());
root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kReshape);
const HloInstruction* new_convolution = root->operand(0);
EXPECT_EQ(new_convolution->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(new_convolution->window().dimensions_size(), 0);
}
TEST_F(Convolution4DExpanderTest, DontConvert3DConvolution) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,1,1,5,20]{4,3,2,1,0} parameter(0)
kernel = f32[20,1,1,1,15]{4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,1,1,5]{4,3,2,1,0} convolution(input, kernel), dim_labels=012bf_i012o->f012b, window={size=1x1x1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 3);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfNoTrivialDimensionAvailable) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[2,10,2,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,2,2,2,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=2x2x2x4}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
TEST_F(Convolution4DExpanderTest, DontConvertIfPaddingIsNonzero) {
std::string hlo_string = R"(HloModule convolution_4d_fp32
ENTRY convolution_computation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4 stride=2x1x2x1 pad=1_0x0_0x0_1x0_0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
HloInstruction* root = computation->root_instruction();
EXPECT_EQ(root->opcode(), HloOpcode::kConvolution);
EXPECT_EQ(root->window().dimensions_size(), 4);
Convolution4DExpander expander_pass;
ASSERT_FALSE(expander_pass.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_4d_expander.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convolution_4d_expander_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f81a9f1c-f14a-46bb-95cb-3879e36651fc | cpp | tensorflow/tensorflow | renderer | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.cc | tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_test.cc | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/substitute.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/stringpiece.h"
namespace tensorflow {
namespace generator {
namespace cpp {
Renderer::Renderer(RendererContext context) : context_(context) {}
Renderer& Renderer::BlankLine() {
context_.code.AddLineWithoutIndent("");
return *this;
}
Renderer& Renderer::CodeLine(const string& text) {
context_.code.AddLineWithoutIndent(text);
return *this;
}
Renderer& Renderer::CodeLines(const string& text) {
StringPiece trimmed_text(text);
str_util::RemoveWhitespaceContext(&trimmed_text);
for (const string& line : str_util::Split(trimmed_text, '\n')) {
context_.code.AddLineWithoutIndent(line);
}
return *this;
}
Renderer& Renderer::Statement(const string& text) {
if (absl::EndsWith(text, ";")) {
LOG(WARNING) << "Superfluous terminating ';' in '" << text << "'";
context_.code.AddLineWithIndent(text);
} else {
context_.code.AddLineWithIndent(absl::StrCat(text, ";"));
}
return *this;
}
Renderer& Renderer::TFStatement(const string& text) {
return Statement(absl::Substitute("TF_RETURN_IF_ERROR($0)", text));
}
Renderer& Renderer::CommentLine(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat("
return *this;
}
Renderer& Renderer::BlockOpen(const string& text) {
context_.code.AddLineWithIndent(absl::StrCat(text, " {"));
context_.code.IncreaseIndent();
return *this;
}
Renderer& Renderer::BlockClose(const string& text) {
context_.code.DecreaseIndent();
context_.code.AddLineWithIndent(absl::StrCat("}", text));
return *this;
}
}
}
} | #include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.h"
#include "tensorflow/c/experimental/ops/gen/common/path_config.h"
#include "tensorflow/c/experimental/ops/gen/common/source_code.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/cpp_config.h"
#include "tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_context.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace generator {
namespace cpp {
namespace {
TEST(Renderer, typical_usage) {
class TestRenderer : Renderer {
public:
explicit TestRenderer(SourceCode& code)
: Renderer(
{RendererContext::kSource, code, CppConfig(), PathConfig()}) {}
void Render() {
CommentLine("File level comment.");
CodeLine("#include \"header.h\"");
BlankLine();
BlockOpen("void TestFunction()");
{
Statement("int i = 1");
BlankLine();
BlockOpen("while (i == 1)");
{
CommentLine("Do nothing, really....");
CodeLine("#if 0");
Statement("call()");
CodeLine("#endif");
BlockClose();
}
BlockClose("
}
}
};
SourceCode code;
TestRenderer(code).Render();
string expected = R"(
#include "header.h"
void TestFunction() {
int i = 1;
while (i == 1) {
#if 0
call();
#endif
}
}
)";
code.SetSpacesPerIndent(3);
EXPECT_EQ(expected, code.Render());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/renderers/renderer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/ops/gen/cpp/renderers/renderer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |